diff options
Diffstat (limited to 'drivers')
393 files changed, 7869 insertions, 6669 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 73d039156ea7..795d0ca714bf 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ obj-$(CONFIG_PARPORT) += parport/ +obj-$(CONFIG_NVM) += lightnvm/ obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_LIBNVDIMM) += nvdimm/ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ @@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_SCSI) += scsi/ -obj-$(CONFIG_NVM) += lightnvm/ obj-y += nvme/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_TARGET_CORE) += target/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 25dbb76c02cc..5eef4cb4f70e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED bool config ACPI_DEBUGGER - bool "In-kernel debugger (EXPERIMENTAL)" + bool "AML debugger interface (EXPERIMENTAL)" select ACPI_DEBUG help - Enable in-kernel debugging facilities: statistics, internal + Enable in-kernel debugging of AML facilities: statistics, internal object dump, single step control method execution. This is still under development, currently enabling this only results in the compilation of the ACPICA debugger files. diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3c083d2cc434..6730f965b379 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); static int register_pcc_channel(int pcc_subspace_idx) { - struct acpi_pcct_subspace *cppc_ss; + struct acpi_pcct_hw_reduced *cppc_ss; unsigned int len; if (pcc_subspace_idx >= 0) { diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f61a7c834540..b420fb46669d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) } err_exit: - if (result && q) + if (result) acpi_ec_delete_query(q); if (data) *data = value; diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index f7dab53b352a..e7ed39bab97d 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) { + size_t length = min_t(size_t, sizeof(*spa), spa->header.length); struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; list_for_each_entry(nfit_spa, &prev->spas, list) { - if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { + if (memcmp(nfit_spa->spa, spa, length) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } @@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev) { + size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length); struct device *dev = acpi_desc->dev; struct nfit_memdev *nfit_memdev; list_for_each_entry(nfit_memdev, &prev->memdevs, list) - if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { + if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); return true; } @@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr) { + size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length); struct device *dev = acpi_desc->dev; struct nfit_dcr *nfit_dcr; list_for_each_entry(nfit_dcr, &prev->dcrs, list) - if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { + if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); return true; } @@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw) { + size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length); struct device *dev = acpi_desc->dev; struct nfit_bdw *nfit_bdw; list_for_each_entry(nfit_bdw, &prev->bdws, list) - if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { + if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); return true; } @@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt) { + size_t length = min_t(size_t, sizeof(*idt), idt->header.length); struct device *dev = acpi_desc->dev; struct nfit_idt *nfit_idt; list_for_each_entry(nfit_idt, &prev->idts, list) - if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { + if (memcmp(nfit_idt->idt, idt, length) == 0) { list_move_tail(&nfit_idt->list, &acpi_desc->idts); return true; } @@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush) { + size_t length = min_t(size_t, sizeof(*flush), flush->header.length); struct device *dev = acpi_desc->dev; struct nfit_flush *nfit_flush; list_for_each_entry(nfit_flush, &prev->flushes, list) - if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { + if (memcmp(nfit_flush->flush, flush, length) == 0) { list_move_tail(&nfit_flush->list, &acpi_desc->flushes); return true; } @@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev, struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); + return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); } static DEVICE_ATTR_RO(revision); @@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) data = (u8 *) acpi_desc->nfit; end = data + sz; - data += sizeof(struct acpi_table_nfit); while (!IS_ERR_OR_NULL(data)) data = add_table(acpi_desc, &prev, data, end); @@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev) return PTR_ERR(acpi_desc); } - acpi_desc->nfit = (struct acpi_table_nfit *) tbl; + /* + * Save the acpi header for later and then skip it, + * making nfit point to the first nfit table header. + */ + acpi_desc->acpi_header = *tbl; + acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); + sz -= sizeof(struct acpi_table_nfit); /* Evaluate _FIT and override with that if present */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_SUCCESS(status) && buf.length > 0) { - acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; - sz = buf.length; + union acpi_object *obj; + /* + * Adjust for the acpi_object header of the _FIT + */ + obj = buf.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + acpi_desc->nfit = + (struct acpi_nfit_header *)obj->buffer.pointer; + sz = obj->buffer.length; + } else + dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", + __func__, (int) obj->type); } rc = acpi_nfit_init(acpi_desc, sz); @@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_table_nfit *nfit_saved; + struct acpi_nfit_header *nfit_saved; + union acpi_object *obj; struct device *dev = &adev->dev; acpi_status status; int ret; @@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) } nfit_saved = acpi_desc->nfit; - acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; - ret = acpi_nfit_init(acpi_desc, buf.length); - if (!ret) { - /* Merge failed, restore old nfit, and exit */ - acpi_desc->nfit = nfit_saved; - dev_err(dev, "failed to merge updated NFIT\n"); + obj = buf.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + acpi_desc->nfit = + (struct acpi_nfit_header *)obj->buffer.pointer; + ret = acpi_nfit_init(acpi_desc, obj->buffer.length); + if (ret) { + /* Merge failed, restore old nfit, and exit */ + acpi_desc->nfit = nfit_saved; + dev_err(dev, "failed to merge updated NFIT\n"); + } + } else { + /* Bad _FIT, restore old nfit */ + dev_err(dev, "Invalid _FIT\n"); } kfree(buf.pointer); diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 2ea5c0797c8f..3d549a383659 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -96,7 +96,8 @@ struct nfit_mem { struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; - struct acpi_table_nfit *nfit; + struct acpi_table_header acpi_header; + struct acpi_nfit_header *nfit; struct mutex spa_map_mutex; struct mutex init_mutex; struct list_head spa_maps; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 850d7bf0c873..ae3fe4e64203 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info) else continue; + /* + * Some legacy x86 host bridge drivers use iomem_resource and + * ioport_resource as default resource pool, skip it. + */ + if (res == root) + continue; + conflict = insert_resource_conflict(root, res); if (conflict) { dev_info(&info->bridge->dev, diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index bf034f8b7c1a..2fa8304171e0 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -14,7 +14,6 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/dmi.h> #include "sbshc.h" #define PREFIX "ACPI: " @@ -30,6 +29,7 @@ struct acpi_smb_hc { u8 query_bit; smbus_alarm_callback callback; void *context; + bool done; }; static int acpi_smbus_hc_add(struct acpi_device *device); @@ -88,8 +88,6 @@ enum acpi_smb_offset { ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ }; -static bool macbook; - static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) { return ec_read(hc->offset + address, data); @@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data) return ec_write(hc->offset + address, data); } -static inline int smb_check_done(struct acpi_smb_hc *hc) -{ - union acpi_smb_status status = {.raw = 0}; - smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw); - return status.fields.done && (status.fields.status == SMBUS_OK); -} - static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) { - if (wait_event_timeout(hc->wait, smb_check_done(hc), - msecs_to_jiffies(timeout))) + if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout))) return 0; - /* - * After the timeout happens, OS will try to check the status of SMbus. - * If the status is what OS expected, it will be regarded as the bogus - * timeout. - */ - if (smb_check_done(hc)) - return 0; - else - return -ETIME; + return -ETIME; } static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, @@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, } mutex_lock(&hc->lock); - if (macbook) - udelay(5); + hc->done = false; if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) goto end; if (temp) { @@ -235,8 +216,10 @@ static int smbus_alarm(void *context) if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) return 0; /* Check if it is only a completion notify */ - if (status.fields.done) + if (status.fields.done && status.fields.status == SMBUS_OK) { + hc->done = true; wake_up(&hc->wait); + } if (!status.fields.alarm) return 0; mutex_lock(&hc->lock); @@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data); -static int macbook_dmi_match(const struct dmi_system_id *d) -{ - pr_debug("Detected MacBook, enabling workaround\n"); - macbook = true; - return 0; -} - -static struct dmi_system_id acpi_smbus_dmi_table[] = { - { macbook_dmi_match, "Apple MacBook", { - DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, - }, - { }, -}; - static int acpi_smbus_hc_add(struct acpi_device *device) { int status; unsigned long long val; struct acpi_smb_hc *hc; - dmi_check_system(acpi_smbus_dmi_table); - if (!device) return -EINVAL; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e03b1ad25a90..167418e73445 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev) } pd = of_genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); if (IS_ERR(pd)) { dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - of_node_put(dev->of_node); return -EPROBE_DEFER; } @@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev) if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); - of_node_put(dev->of_node); goto out; } diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index e60dd12e23aa..1e937ac5f456 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct gpd_timing_data *td; s64 constraint_ns; - if (!pdd->dev->driver) - continue; - /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index eb6e67451dec..0d77cd6fd8d1 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; @@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a28a562f7b7f..3457ac8c03e2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) sector_t capacity; unsigned int index = 0; struct kobject *kobj; - unsigned char thd_name[16]; if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ @@ -3958,10 +3957,9 @@ skip_create_disk: } start_service_thread: - sprintf(thd_name, "mtip_svc_thd_%02d", index); dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, - dd, dd->numa_node, "%s", - thd_name); + dd, dd->numa_node, + "mtip_svc_thd_%02d", index); if (IS_ERR(dd->mtip_svc_handler)) { dev_err(&dd->pdev->dev, "service thread failed to start\n"); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6255d1c4bba4..0c3940ec5e62 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/hrtimer.h> +#include <linux/lightnvm.h> struct nullb_cmd { struct list_head list; @@ -17,6 +18,7 @@ struct nullb_cmd { struct bio *bio; unsigned int tag; struct nullb_queue *nq; + struct hrtimer timer; }; struct nullb_queue { @@ -39,23 +41,14 @@ struct nullb { struct nullb_queue *queues; unsigned int nr_queues; + char disk_name[DISK_NAME_LEN]; }; static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; static int nullb_indexes; - -struct completion_queue { - struct llist_head list; - struct hrtimer timer; -}; - -/* - * These are per-cpu for now, they will need to be configured by the - * complete_queues parameter and appropriately mapped. - */ -static DEFINE_PER_CPU(struct completion_queue, completion_queues); +static struct kmem_cache *ppa_cache; enum { NULL_IRQ_NONE = 0, @@ -119,6 +112,10 @@ static int nr_devices = 2; module_param(nr_devices, int, S_IRUGO); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); +static bool use_lightnvm; +module_param(use_lightnvm, bool, S_IRUGO); +MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); + static int irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) @@ -135,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = { device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); -static int completion_nsec = 10000; -module_param(completion_nsec, int, S_IRUGO); +static unsigned long completion_nsec = 10000; +module_param(completion_nsec, ulong, S_IRUGO); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); static int hw_queue_depth = 64; @@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd) put_tag(cmd->nq, cmd->tag); } +static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); + static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) { struct nullb_cmd *cmd; @@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) cmd = &nq->cmds[tag]; cmd->tag = tag; cmd->nq = nq; + if (irqmode == NULL_IRQ_TIMER) { + hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + cmd->timer.function = null_cmd_timer_expired; + } return cmd; } @@ -213,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) static void end_cmd(struct nullb_cmd *cmd) { + struct request_queue *q = NULL; + switch (queue_mode) { case NULL_Q_MQ: blk_mq_end_request(cmd->rq, 0); @@ -223,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd) break; case NULL_Q_BIO: bio_endio(cmd->bio); - break; + goto free_cmd; } + if (cmd->rq) + q = cmd->rq->q; + + /* Restart queue if needed, as we are freeing a tag */ + if (q && !q->mq_ops && blk_queue_stopped(q)) { + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + if (blk_queue_stopped(q)) + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } +free_cmd: free_cmd(cmd); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) { - struct completion_queue *cq; - struct llist_node *entry; - struct nullb_cmd *cmd; - - cq = &per_cpu(completion_queues, smp_processor_id()); - - while ((entry = llist_del_all(&cq->list)) != NULL) { - entry = llist_reverse_order(entry); - do { - struct request_queue *q = NULL; - - cmd = container_of(entry, struct nullb_cmd, ll_list); - entry = entry->next; - if (cmd->rq) - q = cmd->rq->q; - end_cmd(cmd); - - if (q && !q->mq_ops && blk_queue_stopped(q)) { - spin_lock(q->queue_lock); - if (blk_queue_stopped(q)) - blk_start_queue(q); - spin_unlock(q->queue_lock); - } - } while (entry); - } + end_cmd(container_of(timer, struct nullb_cmd, timer)); return HRTIMER_NORESTART; } static void null_cmd_end_timer(struct nullb_cmd *cmd) { - struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); - - cmd->ll_list.next = NULL; - if (llist_add(&cmd->ll_list, &cq->list)) { - ktime_t kt = ktime_set(0, completion_nsec); + ktime_t kt = ktime_set(0, completion_nsec); - hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED); - } - - put_cpu(); + hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); } static void null_softirq_done_fn(struct request *rq) @@ -369,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); + if (irqmode == NULL_IRQ_TIMER) { + hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cmd->timer.function = null_cmd_timer_expired; + } cmd->rq = bd->rq; cmd->nq = hctx->driver_data; @@ -427,15 +419,156 @@ static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); - del_gendisk(nullb->disk); + if (use_lightnvm) + nvm_unregister(nullb->disk_name); + else + del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); - put_disk(nullb->disk); + if (!use_lightnvm) + put_disk(nullb->disk); cleanup_queues(nullb); kfree(nullb); } +#ifdef CONFIG_NVM + +static void null_lnvm_end_io(struct request *rq, int error) +{ + struct nvm_rq *rqd = rq->end_io_data; + struct nvm_dev *dev = rqd->dev; + + dev->mt->end_io(rqd, error); + + blk_put_request(rq); +} + +static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) +{ + struct request *rq; + struct bio *bio = rqd->bio; + + rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); + if (IS_ERR(rq)) + return -ENOMEM; + + rq->cmd_type = REQ_TYPE_DRV_PRIV; + rq->__sector = bio->bi_iter.bi_sector; + rq->ioprio = bio_prio(bio); + + if (bio_has_data(bio)) + rq->nr_phys_segments = bio_phys_segments(q, bio); + + rq->__data_len = bio->bi_iter.bi_size; + rq->bio = rq->biotail = bio; + + rq->end_io_data = rqd; + + blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); + + return 0; +} + +static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) +{ + sector_t size = gb * 1024 * 1024 * 1024ULL; + sector_t blksize; + struct nvm_id_group *grp; + + id->ver_id = 0x1; + id->vmnt = 0; + id->cgrps = 1; + id->cap = 0x3; + id->dom = 0x1; + + id->ppaf.blk_offset = 0; + id->ppaf.blk_len = 16; + id->ppaf.pg_offset = 16; + id->ppaf.pg_len = 16; + id->ppaf.sect_offset = 32; + id->ppaf.sect_len = 8; + id->ppaf.pln_offset = 40; + id->ppaf.pln_len = 8; + id->ppaf.lun_offset = 48; + id->ppaf.lun_len = 8; + id->ppaf.ch_offset = 56; + id->ppaf.ch_len = 8; + + do_div(size, bs); /* convert size to pages */ + do_div(size, 256); /* concert size to pgs pr blk */ + grp = &id->groups[0]; + grp->mtype = 0; + grp->fmtype = 0; + grp->num_ch = 1; + grp->num_pg = 256; + blksize = size; + do_div(size, (1 << 16)); + grp->num_lun = size + 1; + do_div(blksize, grp->num_lun); + grp->num_blk = blksize; + grp->num_pln = 1; + + grp->fpg_sz = bs; + grp->csecs = bs; + grp->trdt = 25000; + grp->trdm = 25000; + grp->tprt = 500000; + grp->tprm = 500000; + grp->tbet = 1500000; + grp->tbem = 1500000; + grp->mpos = 0x010101; /* single plane rwe */ + grp->cpar = hw_queue_depth; + + return 0; +} + +static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) +{ + mempool_t *virtmem_pool; + + virtmem_pool = mempool_create_slab_pool(64, ppa_cache); + if (!virtmem_pool) { + pr_err("null_blk: Unable to create virtual memory pool\n"); + return NULL; + } + + return virtmem_pool; +} + +static void null_lnvm_destroy_dma_pool(void *pool) +{ + mempool_destroy(pool); +} + +static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, + gfp_t mem_flags, dma_addr_t *dma_handler) +{ + return mempool_alloc(pool, mem_flags); +} + +static void null_lnvm_dev_dma_free(void *pool, void *entry, + dma_addr_t dma_handler) +{ + mempool_free(entry, pool); +} + +static struct nvm_dev_ops null_lnvm_dev_ops = { + .identity = null_lnvm_id, + .submit_io = null_lnvm_submit_io, + + .create_dma_pool = null_lnvm_create_dma_pool, + .destroy_dma_pool = null_lnvm_destroy_dma_pool, + .dev_dma_alloc = null_lnvm_dev_dma_alloc, + .dev_dma_free = null_lnvm_dev_dma_free, + + /* Simulate nvme protocol restriction */ + .max_phys_sect = 64, +}; +#else +static struct nvm_dev_ops null_lnvm_dev_ops; +#endif /* CONFIG_NVM */ + static int null_open(struct block_device *bdev, fmode_t mode) { return 0; @@ -575,11 +708,6 @@ static int null_add_dev(void) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); - disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { - rv = -ENOMEM; - goto out_cleanup_blk_queue; - } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -589,6 +717,21 @@ static int null_add_dev(void) blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); + sprintf(nullb->disk_name, "nullb%d", nullb->index); + + if (use_lightnvm) { + rv = nvm_register(nullb->q, nullb->disk_name, + &null_lnvm_dev_ops); + if (rv) + goto out_cleanup_blk_queue; + goto done; + } + + disk = nullb->disk = alloc_disk_node(1, home_node); + if (!disk) { + rv = -ENOMEM; + goto out_cleanup_lightnvm; + } size = gb * 1024 * 1024 * 1024ULL; set_capacity(disk, size >> 9); @@ -598,10 +741,15 @@ static int null_add_dev(void) disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; - sprintf(disk->disk_name, "nullb%d", nullb->index); + strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + add_disk(disk); +done: return 0; +out_cleanup_lightnvm: + if (use_lightnvm) + nvm_unregister(nullb->disk_name); out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: @@ -625,6 +773,18 @@ static int __init null_init(void) bs = PAGE_SIZE; } + if (use_lightnvm && bs != 4096) { + pr_warn("null_blk: LightNVM only supports 4k block size\n"); + pr_warn("null_blk: defaults block size to 4k\n"); + bs = 4096; + } + + if (use_lightnvm && queue_mode != NULL_Q_MQ) { + pr_warn("null_blk: LightNVM only supported for blk-mq\n"); + pr_warn("null_blk: defaults queue mode to blk-mq\n"); + queue_mode = NULL_Q_MQ; + } + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", @@ -638,32 +798,31 @@ static int __init null_init(void) mutex_init(&lock); - /* Initialize a separate list for each CPU for issuing softirqs */ - for_each_possible_cpu(i) { - struct completion_queue *cq = &per_cpu(completion_queues, i); - - init_llist_head(&cq->list); - - if (irqmode != NULL_IRQ_TIMER) - continue; - - hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cq->timer.function = null_cmd_timer_expired; - } - null_major = register_blkdev(0, "nullb"); if (null_major < 0) return null_major; + if (use_lightnvm) { + ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), + 0, 0, NULL); + if (!ppa_cache) { + pr_err("null_blk: unable to create ppa cache\n"); + return -ENOMEM; + } + } + for (i = 0; i < nr_devices; i++) { if (null_add_dev()) { unregister_blkdev(null_major, "nullb"); - return -EINVAL; + goto err_ppa; } } pr_info("null: module loaded\n"); return 0; +err_ppa: + kmem_cache_destroy(ppa_cache); + return -EINVAL; } static void __exit null_exit(void) @@ -678,6 +837,8 @@ static void __exit null_exit(void) null_del_dev(nullb); } mutex_unlock(&lock); + + kmem_cache_destroy(ppa_cache); } module_init(null_init); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 235708c7c46e..81ea69fee7ca 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work) goto err_rq; } img_request->rq = rq; + snapc = NULL; /* img_request consumes a ref */ if (op_type == OBJ_OP_DISCARD) result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index 9f1856948758..bf500e0e7362 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = { module_platform_driver(omap_ocp2scp_driver); -MODULE_ALIAS("platform: omap-ocp2scp"); +MODULE_ALIAS("platform:omap-ocp2scp"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP OCP2SCP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 654f6f36a071..55fe9020459f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) return rv; } -static void start_check_enables(struct smi_info *smi_info) +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) +{ + smi_info->last_timeout_jiffies = jiffies; + mod_timer(&smi_info->si_timer, new_val); + smi_info->timer_running = true; +} + +/* + * Start a new message and (re)start the timer and thread. + */ +static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, + unsigned int size) +{ + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + + smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); +} + +static void start_check_enables(struct smi_info *smi_info, bool start_timer) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + if (start_timer) + start_new_msg(smi_info, msg, 2); + else + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); smi_info->si_state = SI_CHECKING_ENABLES; } -static void start_clear_flags(struct smi_info *smi_info) +static void start_clear_flags(struct smi_info *smi_info, bool start_timer) { unsigned char msg[3]; @@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info) msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + if (start_timer) + start_new_msg(smi_info, msg, 3); + else + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } @@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info) smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; smi_info->curr_msg->data_size = 2; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); + start_new_msg(smi_info, smi_info->curr_msg->data, + smi_info->curr_msg->data_size); smi_info->si_state = SI_GETTING_MESSAGES; } @@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info) smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; smi_info->curr_msg->data_size = 2; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); + start_new_msg(smi_info, smi_info->curr_msg->data, + smi_info->curr_msg->data_size); smi_info->si_state = SI_GETTING_EVENTS; } -static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) -{ - smi_info->last_timeout_jiffies = jiffies; - mod_timer(&smi_info->si_timer, new_val); - smi_info->timer_running = true; -} - /* * When we have a situtaion where we run out of memory and cannot * allocate messages, we just leave them in the BMC and run the system @@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) * Note that we cannot just use disable_irq(), since the interrupt may * be shared. */ -static inline bool disable_si_irq(struct smi_info *smi_info) +static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = true; - start_check_enables(smi_info); + start_check_enables(smi_info, start_timer); return true; } return false; @@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = false; - start_check_enables(smi_info); + start_check_enables(smi_info, true); return true; } return false; @@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) msg = ipmi_alloc_smi_msg(); if (!msg) { - if (!disable_si_irq(smi_info)) + if (!disable_si_irq(smi_info, true)) smi_info->si_state = SI_NORMAL; } else if (enable_si_irq(smi_info)) { ipmi_free_smi_msg(msg); @@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info) /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); - start_clear_flags(smi_info); + start_clear_flags(smi_info, true); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; if (smi_info->intf) ipmi_smi_watchdog_pretimeout(smi_info->intf); @@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; - smi_info->handlers->start_transaction( - smi_info->si_sm, msg, 2); + start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } @@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, * disable and messages disabled. */ if (smi_info->supports_event_msg_buff || smi_info->irq) { - start_check_enables(smi_info); + start_check_enables(smi_info, true); } else { smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) @@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, } goto restart; } + + if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { + /* Ok it if fails, the timer will just go off. */ + if (del_timer(&smi_info->si_timer)) + smi_info->timer_running = false; + } + out: return si_sm_result; } @@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = { .data = (void *)(unsigned long) SI_BT }, {}, }; +MODULE_DEVICE_TABLE(of, of_ipmi_match); static int of_ipmi_probe(struct platform_device *dev) { @@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev) } return 0; } -MODULE_DEVICE_TABLE(of, of_ipmi_match); #else #define of_ipmi_match NULL static int of_ipmi_probe(struct platform_device *dev) @@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi) * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ - start_clear_flags(new_smi); + start_clear_flags(new_smi, false); /* * IRQ is defined to be set when non-zero. req_events will @@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean) poll(to_clean); schedule_timeout_uninterruptible(1); } - disable_si_irq(to_clean); + disable_si_irq(to_clean, false); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 0ac3bd1a5497..096f0cef4da1 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -153,6 +153,9 @@ static int timeout = 10; /* The pre-timeout is disabled by default. */ static int pretimeout; +/* Default timeout to set on panic */ +static int panic_wdt_timeout = 255; + /* Default action is to reset the board on a timeout. */ static unsigned char action_val = WDOG_TIMEOUT_RESET; @@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds."); module_param(pretimeout, timeout, 0644); MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); +module_param(panic_wdt_timeout, timeout, 0644); +MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds."); + module_param_cb(action, ¶m_ops_str, action_op, 0644); MODULE_PARM_DESC(action, "Timeout action. One of: " "reset, none, power_cycle, power_off."); @@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this, /* Make sure we do this only once. */ panic_event_handled = 1; - timeout = 255; + timeout = panic_wdt_timeout; pretimeout = 0; panic_halt_ipmi_set_timeout(); } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 71cfdf7c9708..2eb5f0efae90 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -1,4 +1,5 @@ menu "Clock Source drivers" + depends on !ARCH_USES_GETTIMEOFFSET config CLKSRC_OF bool diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 10202f1fdfd7..517e1c7624d4 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq) int err; ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); - ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); + ftm_writel(~0u, priv->clkevt_base + FTM_MOD); ftm_reset_counter(priv->clkevt_base); @@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq) int err; ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); - ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); + ftm_writel(~0u, priv->clksrc_base + FTM_MOD); ftm_reset_counter(priv->clksrc_base); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 1582c1c016b0..235a1ba73d92 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ config ARM_MT8173_CPUFREQ bool "Mediatek MT8173 CPUFreq support" depends on ARCH_MEDIATEK && REGULATOR + depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) depends on !CPU_THERMAL || THERMAL=y select PM_OPP help @@ -201,7 +202,7 @@ config ARM_SA1110_CPUFREQ config ARM_SCPI_CPUFREQ tristate "SCPI based CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL + depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI help This adds the CPUfreq driver support for ARM big.LITTLE platforms using SCPI protocol for CPU power management. diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index adbd1de1cea5..c59bdcb83217 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -5,7 +5,6 @@ config X86_INTEL_PSTATE bool "Intel P state control" depends on X86 - select ACPI_PROCESSOR if ACPI help This driver provides a P state for Intel core processors. The driver implements an internal governor and will become diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index e8cb334094b0..7c0bdfb1a2ca 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->max = cpu->perf_caps.highest_perf; policy->cpuinfo.min_freq = policy->min; policy->cpuinfo.max_freq = policy->max; + policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) cpumask_copy(policy->cpus, cpu->shared_cpu_map); - else { + else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ pr_debug("Unsupported CPU co-ord type\n"); return -EFAULT; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7c48e7316d91..8412ce5f93a7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) new_policy.governor = gov; - /* Use the default policy if its valid. */ - if (cpufreq_driver->setpolicy) - cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); - + /* Use the default policy if there is no last_policy. */ + if (cpufreq_driver->setpolicy) { + if (policy->last_policy) + new_policy.policy = policy->last_policy; + else + cpufreq_parse_governor(gov->name, &new_policy.policy, + NULL); + } /* set default policy */ return cpufreq_set_policy(policy, &new_policy); } @@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu) if (has_target()) strncpy(policy->last_governor, policy->governor->name, CPUFREQ_NAME_LEN); + else + policy->last_policy = policy->policy; } else if (cpu == policy->cpu) { /* Nominate new CPU */ policy->cpu = cpumask_any(policy->cpus); @@ -1401,13 +1407,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) } cpumask_clear_cpu(cpu, policy->real_cpus); + remove_cpu_dev_symlink(policy, cpu); - if (cpumask_empty(policy->real_cpus)) { + if (cpumask_empty(policy->real_cpus)) cpufreq_policy_free(policy, true); - return; - } - - remove_cpu_dev_symlink(policy, cpu); } static void handle_update(struct work_struct *work) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2e31d097def6..4d07cbd2b23c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -34,14 +34,10 @@ #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> -#if IS_ENABLED(CONFIG_ACPI) -#include <acpi/processor.h> -#endif - -#define BYT_RATIOS 0x66a -#define BYT_VIDS 0x66b -#define BYT_TURBO_RATIOS 0x66c -#define BYT_TURBO_VIDS 0x66d +#define ATOM_RATIOS 0x66a +#define ATOM_VIDS 0x66b +#define ATOM_TURBO_RATIOS 0x66c +#define ATOM_TURBO_VIDS 0x66d #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) @@ -117,9 +113,6 @@ struct cpudata { u64 prev_mperf; u64 prev_tsc; struct sample sample; -#if IS_ENABLED(CONFIG_ACPI) - struct acpi_processor_performance acpi_perf_data; -#endif }; static struct cpudata **all_cpu_data; @@ -150,7 +143,6 @@ struct cpu_defaults { static struct pstate_adjust_policy pid_params; static struct pstate_funcs pstate_funcs; static int hwp_active; -static int no_acpi_perf; struct perf_limits { int no_turbo; @@ -163,8 +155,6 @@ struct perf_limits { int max_sysfs_pct; int min_policy_pct; int min_sysfs_pct; - int max_perf_ctl; - int min_perf_ctl; }; static struct perf_limits performance_limits = { @@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = { .max_sysfs_pct = 100, .min_policy_pct = 0, .min_sysfs_pct = 0, - .max_perf_ctl = 0, - .min_perf_ctl = 0, }; #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE @@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits; static struct perf_limits *limits = &powersave_limits; #endif -#if IS_ENABLED(CONFIG_ACPI) -/* - * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and - * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and - * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state - * ratio, out of it only high 8 bits are used. For example 0x1700 is setting - * target ratio 0x17. The _PSS control value stores in a format which can be - * directly written to PERF_CTL MSR. But in intel_pstate driver this shift - * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). - * This function converts the _PSS control value to intel pstate driver format - * for comparison and assignment. - */ -static int convert_to_native_pstate_format(struct cpudata *cpu, int index) -{ - return cpu->acpi_perf_data.states[index].control >> 8; -} - -static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) -{ - struct cpudata *cpu; - int ret; - bool turbo_absent = false; - int max_pstate_index; - int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; - int i; - - cpu = all_cpu_data[policy->cpu]; - - pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", - cpu->pstate.min_pstate, cpu->pstate.max_pstate, - cpu->pstate.turbo_pstate); - - if (!cpu->acpi_perf_data.shared_cpu_map && - zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, - GFP_KERNEL, cpu_to_node(policy->cpu))) { - return -ENOMEM; - } - - ret = acpi_processor_register_performance(&cpu->acpi_perf_data, - policy->cpu); - if (ret) - return ret; - - /* - * Check if the control value in _PSS is for PERF_CTL MSR, which should - * guarantee that the states returned by it map to the states in our - * list directly. - */ - if (cpu->acpi_perf_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) - return -EIO; - - pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) - pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", - (i == cpu->acpi_perf_data.state ? '*' : ' '), i, - (u32) cpu->acpi_perf_data.states[i].core_frequency, - (u32) cpu->acpi_perf_data.states[i].power, - (u32) cpu->acpi_perf_data.states[i].control); - - /* - * If there is only one entry _PSS, simply ignore _PSS and continue as - * usual without taking _PSS into account - */ - if (cpu->acpi_perf_data.state_count < 2) - return 0; - - turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); - min_pss_ctl = convert_to_native_pstate_format(cpu, - cpu->acpi_perf_data.state_count - 1); - /* Check if there is a turbo freq in _PSS */ - if (turbo_pss_ctl <= cpu->pstate.max_pstate && - turbo_pss_ctl > cpu->pstate.min_pstate) { - pr_debug("intel_pstate: no turbo range exists in _PSS\n"); - limits->no_turbo = limits->turbo_disabled = 1; - cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; - turbo_absent = true; - } - - /* Check if the max non turbo p state < Intel P state max */ - max_pstate_index = turbo_absent ? 0 : 1; - max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); - if (max_pss_ctl < cpu->pstate.max_pstate && - max_pss_ctl > cpu->pstate.min_pstate) - cpu->pstate.max_pstate = max_pss_ctl; - - /* check If min perf > Intel P State min */ - if (min_pss_ctl > cpu->pstate.min_pstate && - min_pss_ctl < cpu->pstate.max_pstate) { - cpu->pstate.min_pstate = min_pss_ctl; - policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; - } - - if (turbo_absent) - policy->cpuinfo.max_freq = cpu->pstate.max_pstate * - cpu->pstate.scaling; - else { - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * - cpu->pstate.scaling; - /* - * The _PSS table doesn't contain whole turbo frequency range. - * This just contains +1 MHZ above the max non turbo frequency, - * with control value corresponding to max turbo ratio. But - * when cpufreq set policy is called, it will call with this - * max frequency, which will cause a reduced performance as - * this driver uses real max turbo frequency as the max - * frequeny. So correct this frequency in _PSS table to - * correct max turbo frequency based on the turbo ratio. - * Also need to convert to MHz as _PSS freq is in MHz. - */ - cpu->acpi_perf_data.states[0].core_frequency = - turbo_pss_ctl * 100; - } - - pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", - cpu->pstate.min_pstate, cpu->pstate.max_pstate, - cpu->pstate.turbo_pstate); - pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", - policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); - - return 0; -} - -static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) -{ - struct cpudata *cpu; - - if (!no_acpi_perf) - return 0; - - cpu = all_cpu_data[policy->cpu]; - acpi_processor_unregister_performance(policy->cpu); - return 0; -} - -#else -static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) -{ - return 0; -} - -static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - static inline void pid_reset(struct _pid *pid, int setpoint, int busy, int deadband, int integral) { pid->setpoint = setpoint; @@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } -static int byt_get_min_pstate(void) +static int atom_get_min_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 8) & 0x7F; } -static int byt_get_max_pstate(void) +static int atom_get_max_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 16) & 0x7F; } -static int byt_get_turbo_pstate(void) +static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(BYT_TURBO_RATIOS, value); + rdmsrl(ATOM_TURBO_RATIOS, value); return value & 0x7F; } -static void byt_set_pstate(struct cpudata *cpudata, int pstate) +static void atom_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; int32_t vid_fp; @@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); } -#define BYT_BCLK_FREQS 5 -static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; - -static int byt_get_scaling(void) +static int silvermont_get_scaling(void) { u64 value; int i; + /* Defined in Table 35-6 from SDM (Sept 2015) */ + static int silvermont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000}; rdmsrl(MSR_FSB_FREQ, value); - i = value & 0x3; + i = value & 0x7; + WARN_ON(i > 4); - BUG_ON(i > BYT_BCLK_FREQS); + return silvermont_freq_table[i]; +} - return byt_freq_table[i] * 100; +static int airmont_get_scaling(void) +{ + u64 value; + int i; + /* Defined in Table 35-10 from SDM (Sept 2015) */ + static int airmont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000, + 93300, 90000, 88900, 87500}; + + rdmsrl(MSR_FSB_FREQ, value); + i = value & 0xF; + WARN_ON(i > 8); + + return airmont_freq_table[i]; } -static void byt_get_vid(struct cpudata *cpudata) +static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(BYT_VIDS, value); + rdmsrl(ATOM_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(BYT_TURBO_VIDS, value); + rdmsrl(ATOM_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -885,7 +741,7 @@ static struct cpu_defaults core_params = { }, }; -static struct cpu_defaults byt_params = { +static struct cpu_defaults silvermont_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = { .i_gain_pct = 4, }, .funcs = { - .get_max = byt_get_max_pstate, - .get_max_physical = byt_get_max_pstate, - .get_min = byt_get_min_pstate, - .get_turbo = byt_get_turbo_pstate, - .set = byt_set_pstate, - .get_scaling = byt_get_scaling, - .get_vid = byt_get_vid, + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = silvermont_get_scaling, + .get_vid = atom_get_vid, + }, +}; + +static struct cpu_defaults airmont_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 60, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = airmont_get_scaling, + .get_vid = atom_get_vid, }, }; @@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) * policy, or by cpu specific default values determined through * experimentation. */ - if (limits->max_perf_ctl && limits->max_sysfs_pct >= - limits->max_policy_pct) { - *max = limits->max_perf_ctl; - } else { - max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), - limits->max_perf)); - *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, - cpu->pstate.turbo_pstate); - } + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); + *max = clamp_t(int, max_perf_adj, + cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); - if (limits->min_perf_ctl) { - *min = limits->min_perf_ctl; - } else { - min_perf = fp_toint(mul_fp(int_tofp(max_perf), - limits->min_perf)); - *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); - } + min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); + *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) @@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, core_params), ICPU(0x2d, core_params), - ICPU(0x37, byt_params), + ICPU(0x37, silvermont_params), ICPU(0x3a, core_params), ICPU(0x3c, core_params), ICPU(0x3d, core_params), @@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x45, core_params), ICPU(0x46, core_params), ICPU(0x47, core_params), - ICPU(0x4c, byt_params), + ICPU(0x4c, airmont_params), ICPU(0x4e, core_params), ICPU(0x4f, core_params), ICPU(0x5e, core_params), @@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) static int intel_pstate_set_policy(struct cpufreq_policy *policy) { -#if IS_ENABLED(CONFIG_ACPI) - struct cpudata *cpu; - int i; -#endif - pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, - policy->cpuinfo.max_freq, policy->max); if (!policy->cpuinfo.max_freq) return -ENODEV; @@ -1242,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) policy->max >= policy->cpuinfo.max_freq) { pr_debug("intel_pstate: set performance\n"); limits = &performance_limits; + if (hwp_active) + intel_pstate_hwp_set(); return 0; } @@ -1249,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits = &powersave_limits; limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); - limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; + limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, + policy->cpuinfo.max_freq); limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); /* Normalize user input to [min_policy_pct, max_policy_pct] */ @@ -1261,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_sysfs_pct); limits->max_perf_pct = max(limits->min_policy_pct, limits->max_perf_pct); + limits->max_perf = round_up(limits->max_perf, 8); /* Make sure min_perf_pct <= max_perf_pct */ limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); @@ -1270,23 +1133,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), int_tofp(100)); -#if IS_ENABLED(CONFIG_ACPI) - cpu = all_cpu_data[policy->cpu]; - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { - int control; - - control = convert_to_native_pstate_format(cpu, i); - if (control * cpu->pstate.scaling == policy->max) - limits->max_perf_ctl = control; - if (control * cpu->pstate.scaling == policy->min) - limits->min_perf_ctl = control; - } - - pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", - policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, - limits->max_perf_ctl); -#endif - if (hwp_active) intel_pstate_hwp_set(); @@ -1341,30 +1187,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; - if (!no_acpi_perf) - intel_pstate_init_perf_limits(policy); - /* - * If there is no acpi perf data or error, we ignore and use Intel P - * state calculated limits, So this is not fatal error. - */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); return 0; } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) -{ - return intel_pstate_exit_perf_limits(policy); -} - static struct cpufreq_driver intel_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, - .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, .name = "intel_pstate", }; @@ -1406,6 +1240,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) } #if IS_ENABLED(CONFIG_ACPI) +#include <acpi/processor.h> static bool intel_pstate_no_acpi_pss(void) { @@ -1601,9 +1436,6 @@ static int __init intel_pstate_setup(char *str) force_load = 1; if (!strcmp(str, "hwp_only")) hwp_only = 1; - if (!strcmp(str, "no_acpi")) - no_acpi_perf = 1; - return 0; } early_param("intel_pstate", intel_pstate_setup); diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 73ef49922788..7038f364acb5 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req, processed += to_process; } while (processed < nbytes); - rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, + rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index eee624f589b6..abd465f479c4 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -21,6 +21,7 @@ #include <crypto/internal/aead.h> #include <crypto/aes.h> +#include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> @@ -418,7 +419,7 @@ mac: itag, req->src, req->assoclen + nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); - rc = memcmp(itag, otag, + rc = crypto_memneq(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 03856ad280b9..473d36d91644 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, goto out_err; } - params_head = section_head->params; + params_head = section.params; while (params_head) { if (copy_from_user(&key_val, (void __user *)params_head, diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 46f531e19ccf..b6f9f42e2985 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, } else oicv = (char *)&edesc->link_tbl[0]; - err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; + err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; } kfree(edesc); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 4e55239c7a30..53d22eb73b56 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, return NULL; dev_info(chan2dev(chan), - "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", - __func__, xt->src_start, xt->dst_start, xt->numf, + "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", + __func__, &xt->src_start, &xt->dst_start, xt->numf, xt->frame_size, flags); /* @@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, u32 ctrla; u32 ctrlb; - dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", - dest, src, len, flags); + dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", + &dest, &src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); @@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, void __iomem *vaddr; dma_addr_t paddr; - dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, - dest, value, len, flags); + dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, + &dest, value, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); @@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, dma_addr_t dest = sg_dma_address(sg); size_t len = sg_dma_len(sg); - dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", - __func__, dest, len); + dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n", + __func__, &dest, len); if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { dev_err(chan2dev(chan), "%s: buffer is not aligned\n", @@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, unsigned int periods = buf_len / period_len; unsigned int i; - dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", + dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n", direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", - buf_addr, + &buf_addr, periods, buf_len, period_len); if (unlikely(!atslave || !buf_len || !period_len)) { diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d1cfc8c876f9..7f58f06157f6 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) { dev_crit(chan2dev(&atchan->chan_common), - " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", - lli->saddr, lli->daddr, - lli->ctrla, lli->ctrlb, lli->dscr); + " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", + &lli->saddr, &lli->daddr, + lli->ctrla, lli->ctrlb, &lli->dscr); } diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b5e132d4bae5..7f039de143f0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, desc->lld.mbr_sa, desc->lld.mbr_da, + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); /* Chain lld. */ @@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, if ((xt->numf > 1) && (xt->frame_size > 1)) return NULL; - dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", - __func__, xt->src_start, xt->dst_start, xt->numf, + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", + __func__, &xt->src_start, &xt->dst_start, xt->numf, xt->frame_size, flags); src_addr = xt->src_start; @@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; @@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; - dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", - __func__, dest, len, value, flags); + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", + __func__, &dest, len, value, flags); if (unlikely(!len)) return NULL; @@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { - dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", - __func__, sg_dma_address(sg), sg_dma_len(sg), + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", + __func__, &sg_dma_address(sg), sg_dma_len(sg), value, flags); desc = at_xdmac_memset_create_desc(chan, atchan, sg_dma_address(sg), diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 6b03e4e84e6b..0675e268d577 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -107,7 +107,7 @@ /* CCCFG register */ #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ -#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ +#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ @@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) struct platform_device *tc_pdev; int ret; - if (!tc) + if (!IS_ENABLED(CONFIG_OF) || !tc) return; tc_pdev = of_find_device_by_node(tc->node); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 7058d58ba588..0f6fd42f55ca 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1462,7 +1462,7 @@ err_firmware: #define EVENT_REMAP_CELLS 3 -static int __init sdma_event_remap(struct sdma_engine *sdma) +static int sdma_event_remap(struct sdma_engine *sdma) { struct device_node *np = sdma->dev->of_node; struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index ebd8a5f398b0..f1bcc2a163b3 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev) struct usb_dmac *dmac = dev_get_drvdata(dev); int i; - for (i = 0; i < dmac->n_channels; ++i) + for (i = 0; i < dmac->n_channels; ++i) { + if (!dmac->channels[i].iomem) + break; usb_dmac_chan_halt(&dmac->channels[i]); + } return 0; } @@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); - return ret; + goto error_pm; } ret = usb_dmac_init(dmac); - pm_runtime_put(&pdev->dev); if (ret) { dev_err(&pdev->dev, "failed to reset device\n"); @@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev) if (ret < 0) goto error; + pm_runtime_put(&pdev->dev); return 0; error: of_dma_controller_free(pdev->dev.of_node); + pm_runtime_put(&pdev->dev); +error_pm: pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c index 6ed7c0fb3378..6b186829087c 100644 --- a/drivers/gpio/gpio-74xx-mmio.c +++ b/drivers/gpio/gpio-74xx-mmio.c @@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int mmio_74xx_gpio_probe(struct platform_device *pdev) { - const struct of_device_id *of_id = - of_match_device(mmio_74xx_gpio_ids, &pdev->dev); + const struct of_device_id *of_id; struct mmio_74xx_gpio_priv *priv; struct resource *res; void __iomem *dat; int err; + of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev); + if (!of_id) + return -ENODEV; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 56d2d026e62e..f7fbb46d5d79 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) /* MPUIO is a bit different, reading IRQ status clears it */ if (bank->is_mpuio) { irqc->irq_ack = dummy_irq_chip.irq_ack; - irqc->irq_mask = irq_gc_mask_set_bit; - irqc->irq_unmask = irq_gc_mask_clr_bit; if (!bank->regs->wkup_en) irqc->irq_set_wake = NULL; } diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index 171a6389f9ce..52b447c071cb 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c @@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev) const struct palmas_device_data *dev_data; match = of_match_device(of_palmas_gpio_match, &pdev->dev); + if (!match) + return -ENODEV; dev_data = match->data; if (!dev_data) dev_data = &palmas_dev_data; diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 045a952576c7..7b25fdf64802 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids); static int syscon_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); + const struct of_device_id *of_id; struct syscon_gpio_priv *priv; struct device_node *np = dev->of_node; int ret; + of_id = of_match_device(syscon_gpio_ids, dev); + if (!of_id) + return -ENODEV; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 027e5f47dd28..896bf29776b0 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable) } #endif +#ifdef CONFIG_DEBUG_FS + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static int dbg_gpio_show(struct seq_file *s, void *unused) +{ + int i; + int j; + + for (i = 0; i < tegra_gpio_bank_count; i++) { + for (j = 0; j < 4; j++) { + int gpio = tegra_gpio_compose(i, j, 0); + seq_printf(s, + "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", + i, j, + tegra_gpio_readl(GPIO_CNF(gpio)), + tegra_gpio_readl(GPIO_OE(gpio)), + tegra_gpio_readl(GPIO_OUT(gpio)), + tegra_gpio_readl(GPIO_IN(gpio)), + tegra_gpio_readl(GPIO_INT_STA(gpio)), + tegra_gpio_readl(GPIO_INT_ENB(gpio)), + tegra_gpio_readl(GPIO_INT_LVL(gpio))); + } + } + return 0; +} + +static int dbg_gpio_open(struct inode *inode, struct file *file) +{ + return single_open(file, dbg_gpio_show, &inode->i_private); +} + +static const struct file_operations debug_fops = { + .open = dbg_gpio_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void tegra_gpio_debuginit(void) +{ + (void) debugfs_create_file("tegra_gpio", S_IRUGO, + NULL, NULL, &debug_fops); +} + +#else + +static inline void tegra_gpio_debuginit(void) +{ +} + +#endif + static struct irq_chip tegra_gpio_irq_chip = { .name = "GPIO", .irq_ack = tegra_gpio_irq_ack, @@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) spin_lock_init(&bank->lvl_lock[j]); } + tegra_gpio_debuginit(); + return 0; } @@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void) return platform_driver_register(&tegra_gpio_driver); } postcore_initcall(tegra_gpio_init); - -#ifdef CONFIG_DEBUG_FS - -#include <linux/debugfs.h> -#include <linux/seq_file.h> - -static int dbg_gpio_show(struct seq_file *s, void *unused) -{ - int i; - int j; - - for (i = 0; i < tegra_gpio_bank_count; i++) { - for (j = 0; j < 4; j++) { - int gpio = tegra_gpio_compose(i, j, 0); - seq_printf(s, - "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", - i, j, - tegra_gpio_readl(GPIO_CNF(gpio)), - tegra_gpio_readl(GPIO_OE(gpio)), - tegra_gpio_readl(GPIO_OUT(gpio)), - tegra_gpio_readl(GPIO_IN(gpio)), - tegra_gpio_readl(GPIO_INT_STA(gpio)), - tegra_gpio_readl(GPIO_INT_ENB(gpio)), - tegra_gpio_readl(GPIO_INT_LVL(gpio))); - } - } - return 0; -} - -static int dbg_gpio_open(struct inode *inode, struct file *file) -{ - return single_open(file, dbg_gpio_show, &inode->i_private); -} - -static const struct file_operations debug_fops = { - .open = dbg_gpio_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init tegra_gpio_debuginit(void) -{ - (void) debugfs_create_file("tegra_gpio", S_IRUGO, - NULL, NULL, &debug_fops); - return 0; -} -late_initcall(tegra_gpio_debuginit); -#endif diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a18f00fc1bb8..2a91f3287e3b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) for (i = 0; i != chip->ngpio; ++i) { struct gpio_desc *gpio = &chip->desc[i]; - if (!gpio->name) + if (!gpio->name || !name) continue; if (!strcmp(gpio->name, name)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 615ce6d464fb..5a5f04d0902d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -389,7 +389,6 @@ struct amdgpu_clock { * Fences. */ struct amdgpu_fence_driver { - struct amdgpu_ring *ring; uint64_t gpu_addr; volatile uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ @@ -398,7 +397,7 @@ struct amdgpu_fence_driver { bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; - struct delayed_work lockup_work; + struct timer_list fallback_timer; wait_queue_head_t fence_queue; }; @@ -497,6 +496,7 @@ struct amdgpu_bo_va_mapping { /* bo virtual addresses in a specific vm */ struct amdgpu_bo_va { + struct mutex mutex; /* protected by bo being reserved */ struct list_head bo_list; struct fence *last_pt_update; @@ -539,6 +539,7 @@ struct amdgpu_bo { /* Constant after initialization */ struct amdgpu_device *adev; struct drm_gem_object gem_base; + struct amdgpu_bo *parent; struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; @@ -917,8 +918,8 @@ struct amdgpu_ring { #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 struct amdgpu_vm_pt { - struct amdgpu_bo *bo; - uint64_t addr; + struct amdgpu_bo *bo; + uint64_t addr; }; struct amdgpu_vm_id { @@ -926,13 +927,9 @@ struct amdgpu_vm_id { uint64_t pd_gpu_addr; /* last flushed PD/PT update */ struct fence *flushed_updates; - /* last use of vmid */ - struct fence *last_id_use; }; struct amdgpu_vm { - struct mutex mutex; - struct rb_root va; /* protecting invalidated */ @@ -957,24 +954,72 @@ struct amdgpu_vm { /* for id and flush management per ring */ struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; + /* for interval tree */ + spinlock_t it_lock; + /* protecting freed */ + spinlock_t freed_lock; }; struct amdgpu_vm_manager { - struct fence *active[AMDGPU_NUM_VM]; - uint32_t max_pfn; + struct { + struct fence *active; + atomic_long_t owner; + } ids[AMDGPU_NUM_VM]; + + uint32_t max_pfn; /* number of VMIDs */ - unsigned nvm; + unsigned nvm; /* vram base address for page table entry */ - u64 vram_base_offset; + u64 vram_base_offset; /* is vm enabled? */ - bool enabled; - /* for hw to save the PD addr on suspend/resume */ - uint32_t saved_table_addr[AMDGPU_NUM_VM]; + bool enabled; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; struct amdgpu_ring *vm_pte_funcs_ring; }; +void amdgpu_vm_manager_fini(struct amdgpu_device *adev); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); +void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); +struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct list_head *head); +int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync); +void amdgpu_vm_flush(struct amdgpu_ring *ring, + struct amdgpu_vm *vm, + struct fence *updates); +void amdgpu_vm_fence(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct fence *fence); +uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); +int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, + struct amdgpu_vm *vm); +int amdgpu_vm_clear_freed(struct amdgpu_device *adev, + struct amdgpu_vm *vm); +int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync); +int amdgpu_vm_bo_update(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + struct ttm_mem_reg *mem); +void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, + struct amdgpu_bo *bo); +struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, + struct amdgpu_bo *bo); +struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo); +int amdgpu_vm_bo_map(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + uint64_t addr, uint64_t offset, + uint64_t size, uint32_t flags); +int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + uint64_t addr); +void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va); +int amdgpu_vm_free_job(struct amdgpu_job *job); + /* * context related structures */ @@ -1211,6 +1256,7 @@ struct amdgpu_cs_parser { /* relocations */ struct amdgpu_bo_list_entry *vm_bos; struct list_head validated; + struct fence *fence; struct amdgpu_ib *ibs; uint32_t num_ibs; @@ -1226,7 +1272,7 @@ struct amdgpu_job { struct amdgpu_device *adev; struct amdgpu_ib *ibs; uint32_t num_ibs; - struct mutex job_lock; + void *owner; struct amdgpu_user_fence uf; int (*free_job)(struct amdgpu_job *job); }; @@ -2257,11 +2303,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); -struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct amdgpu_ctx *ctx, - struct amdgpu_ib *ibs, - uint32_t num_ibs); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, @@ -2319,49 +2360,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* - * vm - */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); -struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *head); -int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync); -void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates); -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_fence *fence); -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm); -int amdgpu_vm_clear_freed(struct amdgpu_device *adev, - struct amdgpu_vm *vm); -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, - struct amdgpu_vm *vm, struct amdgpu_sync *sync); -int amdgpu_vm_bo_update(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); -void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, - struct amdgpu_bo *bo); -struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, - struct amdgpu_bo *bo); -struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_bo *bo); -int amdgpu_vm_bo_map(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, - uint64_t addr, uint64_t offset, - uint64_t size, uint32_t flags); -int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, - uint64_t addr); -void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va); -int amdgpu_vm_free_job(struct amdgpu_job *job); -/* * functions used by amdgpu_encoder.c */ struct amdgpu_afmt_acr { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dfc4d02c7a38..4f352ec9dec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } -struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct amdgpu_ctx *ctx, - struct amdgpu_ib *ibs, - uint32_t num_ibs) -{ - struct amdgpu_cs_parser *parser; - int i; - - parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); - if (!parser) - return NULL; - - parser->adev = adev; - parser->filp = filp; - parser->ctx = ctx; - parser->ibs = ibs; - parser->num_ibs = num_ibs; - for (i = 0; i < num_ibs; i++) - ibs[i].ctx = ctx; - - return parser; -} - int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; @@ -246,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } p->uf.bo = gem_to_amdgpu_bo(gobj); + amdgpu_bo_ref(p->uf.bo); + drm_gem_object_unreference_unlocked(gobj); p->uf.offset = fence_data->offset; } else { ret = -EINVAL; @@ -463,8 +441,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; } -static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) +/** + * cs_parser_fini() - clean parser states + * @parser: parser structure holding parsing context. + * @error: error number + * + * If error is set than unvalidate buffer, otherwise just free memory + * used by parsing context. + **/ +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) { + unsigned i; + if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -479,17 +467,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err list_sort(NULL, &parser->validated, cmp_size_smaller_first); ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - &parser->ibs[parser->num_ibs-1].fence->base); + &parser->validated, + parser->fence); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } -} + fence_put(parser->fence); -static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) -{ - unsigned i; if (parser->ctx) amdgpu_ctx_put(parser->ctx); if (parser->bo_list) @@ -499,31 +484,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - if (!amdgpu_enable_scheduler) - { - if (parser->ibs) - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); - kfree(parser->ibs); - if (parser->uf.bo) - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); - } - - kfree(parser); -} - -/** - * cs_parser_fini() - clean parser states - * @parser: parser structure holding parsing context. - * @error: error number - * - * If error is set than unvalidate buffer, otherwise just free memory - * used by parsing context. - **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) -{ - amdgpu_cs_parser_fini_early(parser, error, backoff); - amdgpu_cs_parser_fini_late(parser); + if (parser->ibs) + for (i = 0; i < parser->num_ibs; i++) + amdgpu_ib_free(parser->adev, &parser->ibs[i]); + kfree(parser->ibs); + if (parser->uf.bo) + amdgpu_bo_unref(&parser->uf.bo); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -610,15 +576,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, } r = amdgpu_bo_vm_update_pte(parser, vm); - if (r) { - goto out; - } - amdgpu_cs_sync_rings(parser); - if (!amdgpu_enable_scheduler) - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, - parser->filp); + if (!r) + amdgpu_cs_sync_rings(parser); -out: return r; } @@ -818,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) amdgpu_ib_free(job->adev, &job->ibs[i]); kfree(job->ibs); if (job->uf.bo) - drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); + amdgpu_bo_unref(&job->uf.bo); return 0; } @@ -826,38 +786,35 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; union drm_amdgpu_cs *cs = data; - struct amdgpu_fpriv *fpriv = filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_cs_parser *parser; + struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; int i, r; if (!adev->accel_working) return -EBUSY; - parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); - if (!parser) - return -ENOMEM; - r = amdgpu_cs_parser_init(parser, data); + parser.adev = adev; + parser.filp = filp; + + r = amdgpu_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(parser, r, false); + amdgpu_cs_parser_fini(&parser, r, false); r = amdgpu_cs_handle_lockup(adev, r); return r; } - mutex_lock(&vm->mutex); - r = amdgpu_cs_parser_relocs(parser); + r = amdgpu_cs_parser_relocs(&parser); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else if (r && r != -ERESTARTSYS) DRM_ERROR("Failed to process the buffer list %d!\n", r); else if (!r) { reserved_buffers = true; - r = amdgpu_cs_ib_fill(adev, parser); + r = amdgpu_cs_ib_fill(adev, &parser); } if (!r) { - r = amdgpu_cs_dependencies(adev, parser); + r = amdgpu_cs_dependencies(adev, &parser); if (r) DRM_ERROR("Failed in the dependencies handling %d!\n", r); } @@ -865,63 +822,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) goto out; - for (i = 0; i < parser->num_ibs; i++) - trace_amdgpu_cs(parser, i); + for (i = 0; i < parser.num_ibs; i++) + trace_amdgpu_cs(&parser, i); - r = amdgpu_cs_ib_vm_chunk(adev, parser); + r = amdgpu_cs_ib_vm_chunk(adev, &parser); if (r) goto out; - if (amdgpu_enable_scheduler && parser->num_ibs) { + if (amdgpu_enable_scheduler && parser.num_ibs) { + struct amdgpu_ring * ring = parser.ibs->ring; + struct amd_sched_fence *fence; struct amdgpu_job *job; - struct amdgpu_ring * ring = parser->ibs->ring; + job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) { r = -ENOMEM; goto out; } + job->base.sched = &ring->sched; - job->base.s_entity = &parser->ctx->rings[ring->idx].entity; - job->adev = parser->adev; - job->ibs = parser->ibs; - job->num_ibs = parser->num_ibs; - job->base.owner = parser->filp; - mutex_init(&job->job_lock); + job->base.s_entity = &parser.ctx->rings[ring->idx].entity; + job->adev = parser.adev; + job->owner = parser.filp; + job->free_job = amdgpu_cs_free_job; + + job->ibs = parser.ibs; + job->num_ibs = parser.num_ibs; + parser.ibs = NULL; + parser.num_ibs = 0; + if (job->ibs[job->num_ibs - 1].user) { - memcpy(&job->uf, &parser->uf, - sizeof(struct amdgpu_user_fence)); + job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; + parser.uf.bo = NULL; } - job->free_job = amdgpu_cs_free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); + fence = amd_sched_fence_create(job->base.s_entity, + parser.filp); + if (!fence) { + r = -ENOMEM; amdgpu_cs_free_job(job); kfree(job); goto out; } - cs->out.handle = - amdgpu_ctx_add_fence(parser->ctx, ring, - &job->base.s_fence->base); - parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; + job->base.s_fence = fence; + parser.fence = fence_get(&fence->base); - list_sort(NULL, &parser->validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - &job->base.s_fence->base); + cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, + &fence->base); + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - mutex_unlock(&job->job_lock); - amdgpu_cs_parser_fini_late(parser); - mutex_unlock(&vm->mutex); - return 0; + trace_amdgpu_cs_ioctl(job); + amd_sched_entity_push_job(&job->base); + + } else { + struct amdgpu_fence *fence; + + r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, + parser.filp); + fence = parser.ibs[parser.num_ibs - 1].fence; + parser.fence = fence_get(&fence->base); + cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; } - cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; out: - amdgpu_cs_parser_fini(parser, r, reserved_buffers); - mutex_unlock(&vm->mutex); + amdgpu_cs_parser_fini(&parser, r, reserved_buffers); r = amdgpu_cs_handle_lockup(adev, r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e173a5a02f0d..5580d3420c3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; unsigned i; + int vpos, hpos, stat, min_udelay; + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; amdgpu_flip_wait_fence(adev, &work->excl); for (i = 0; i < work->shared_count; ++i) @@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); + /* If this happens to execute within the "virtually extended" vblank + * interval before the start of the real vblank interval then it needs + * to delay programming the mmio flip until the real vblank is entered. + * This prevents completing a flip too early due to the way we fudge + * our vblank counter and vblank timestamps in order to work around the + * problem that the hw fires vblank interrupts before actual start of + * vblank (when line buffer refilling is done for a frame). It + * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for + * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. + * + * In practice this won't execute very often unless on very fast + * machines because the time window for this to happen is very small. + */ + for (;;) { + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank + * start in hpos, and to the "fudged earlier" vblank start in + * vpos. + */ + stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, + GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode); + + if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || + !(vpos >= 0 && hpos <= 0)) + break; + + /* Sleep at least until estimated real start of hw vblank */ + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); + usleep_range(min_udelay, 2 * min_udelay); + spin_lock_irqsave(&crtc->dev->event_lock, flags); + }; + /* do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ @@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) } else DRM_ERROR("failed to reserve buffer after flip\n"); - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + amdgpu_bo_unref(&work->old_rbo); kfree(work->shared); kfree(work); } @@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, obj = old_amdgpu_fb->obj; /* take a reference to the old object */ - drm_gem_object_reference(obj); work->old_rbo = gem_to_amdgpu_bo(obj); + amdgpu_bo_ref(work->old_rbo); new_amdgpu_fb = to_amdgpu_framebuffer(fb); obj = new_amdgpu_fb->obj; @@ -222,7 +259,7 @@ pflip_cleanup: amdgpu_bo_unreserve(new_rbo); cleanup: - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + amdgpu_bo_unref(&work->old_rbo); fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) fence_put(work->shared[i]); @@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * \param dev Device to query. * \param pipe Crtc to query. * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * For driver internal use only also supports these flags: + * + * USE_REAL_VBLANKSTART to use the real start of vblank instead + * of a fudged earlier start of vblank. + * + * GET_DISTANCE_TO_VBLANKSTART to return distance to the + * fudged earlier start of vblank in *vpos and the distance + * to true start of vblank in *hpos. + * * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * \param *stime Target location for timestamp taken immediately before @@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, vbl_end = 0; } + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; + } + + /* Fudge vblank to start a few scanlines earlier to handle the + * problem that vblank irqs fire a few scanlines before start + * of vblank. Some driver internal callers need the true vblank + * start to be used and signal this via the USE_REAL_VBLANKSTART flag. + * + * The cause of the "early" vblank irq is that the irq is triggered + * by the line buffer logic when the line buffer read position enters + * the vblank, whereas our crtc scanout position naturally lags the + * line buffer read position. + */ + if (!(flags & USE_REAL_VBLANKSTART)) + vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; + /* Test scanout position against vblank region. */ if ((*vpos < vbl_start) && (*vpos >= vbl_end)) in_vbl = false; + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_IN_VBLANK; + + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from fudged earlier vbl_start */ + *vpos -= vbl_start; + return ret; + } + /* Check if inside vblank area and apply corrective offsets: * vpos will then be >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until @@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* Correct for shifted end of vbl at vbl_end. */ *vpos = *vpos - vbl_end; - /* In vblank? */ - if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; - - /* Is vpos outside nominal vblank area, but less than - * 1/100 of a frame height away from start of vblank? - * If so, assume this isn't a massively delayed vblank - * interrupt, but a vblank interrupt that fired a few - * microseconds before true start of vblank. Compensate - * by adding a full frame duration to the final timestamp. - * Happens, e.g., on ATI R500, R600. - * - * We only do this if DRM_CALLED_FROM_VBLIRQ. - */ - if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { - vbl_start = mode->crtc_vdisplay; - vtotal = mode->crtc_vtotal; - - if (vbl_start - *vpos < vtotal / 100) { - *vpos -= vtotal; - - /* Signal this correction as "applied". */ - ret |= 0x8; - } - } - return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 257d72205bb5..3671f9f220bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -47,6 +47,9 @@ * that the the relevant GPU caches have been flushed. */ +static struct kmem_cache *amdgpu_fence_slab; +static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); + /** * amdgpu_fence_write - write a fence value * @@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) } /** - * amdgpu_fence_schedule_check - schedule lockup check - * - * @ring: pointer to struct amdgpu_ring - * - * Queues a delayed work item to check for lockups. - */ -static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) -{ - /* - * Do not reset the timer here with mod_delayed_work, - * this can livelock in an interaction with TTM delayed destroy. - */ - queue_delayed_work(system_power_efficient_wq, - &ring->fence_drv.lockup_work, - AMDGPU_FENCE_JIFFIES_TIMEOUT); -} - -/** * amdgpu_fence_emit - emit a fence on the requested ring * * @ring: ring the fence is associated with @@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, struct amdgpu_device *adev = ring->adev; /* we are protected by the ring emission mutex */ - *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); if ((*fence) == NULL) { return -ENOMEM; } @@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, AMDGPU_FENCE_FLAG_INT); - trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); return 0; } /** + * amdgpu_fence_schedule_fallback - schedule fallback check + * + * @ring: pointer to struct amdgpu_ring + * + * Start a timer as fallback to our interrupts. + */ +static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) +{ + mod_timer(&ring->fence_drv.fallback_timer, + jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); +} + +/** * amdgpu_fence_activity - check for fence activity * * @ring: pointer to struct amdgpu_ring @@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); if (seq < last_emitted) - amdgpu_fence_schedule_check(ring); + amdgpu_fence_schedule_fallback(ring); return wake; } /** - * amdgpu_fence_check_lockup - check for hardware lockup + * amdgpu_fence_process - process a fence * - * @work: delayed work item + * @adev: amdgpu_device pointer + * @ring: ring index the fence is associated with * - * Checks for fence activity and if there is none probe - * the hardware if a lockup occured. + * Checks the current fence value and wakes the fence queue + * if the sequence number has increased (all asics). */ -static void amdgpu_fence_check_lockup(struct work_struct *work) +void amdgpu_fence_process(struct amdgpu_ring *ring) { - struct amdgpu_fence_driver *fence_drv; - struct amdgpu_ring *ring; - - fence_drv = container_of(work, struct amdgpu_fence_driver, - lockup_work.work); - ring = fence_drv->ring; - if (amdgpu_fence_activity(ring)) wake_up_all(&ring->fence_drv.fence_queue); } /** - * amdgpu_fence_process - process a fence + * amdgpu_fence_fallback - fallback for hardware interrupts * - * @adev: amdgpu_device pointer - * @ring: ring index the fence is associated with + * @work: delayed work item * - * Checks the current fence value and wakes the fence queue - * if the sequence number has increased (all asics). + * Checks for fence activity. */ -void amdgpu_fence_process(struct amdgpu_ring *ring) +static void amdgpu_fence_fallback(unsigned long arg) { - if (amdgpu_fence_activity(ring)) - wake_up_all(&ring->fence_drv.fence_queue); + struct amdgpu_ring *ring = (void *)arg; + + amdgpu_fence_process(ring); } /** @@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) if (atomic64_read(&ring->fence_drv.last_seq) >= seq) return 0; - amdgpu_fence_schedule_check(ring); + amdgpu_fence_schedule_fallback(ring); wait_event(ring->fence_drv.fence_queue, ( (signaled = amdgpu_fence_seq_signaled(ring, seq)))); @@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) atomic64_set(&ring->fence_drv.last_seq, 0); ring->fence_drv.initialized = false; - INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, - amdgpu_fence_check_lockup); - ring->fence_drv.ring = ring; + setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, + (unsigned long)ring); init_waitqueue_head(&ring->fence_drv.fence_queue); @@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { + if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { + amdgpu_fence_slab = kmem_cache_create( + "amdgpu_fence", sizeof(struct amdgpu_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!amdgpu_fence_slab) + return -ENOMEM; + } if (amdgpu_debugfs_fence_init(adev)) dev_err(adev->dev, "fence debugfs file creation failed\n"); @@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) { int i, r; + if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) + kmem_cache_destroy(amdgpu_fence_slab); mutex_lock(&adev->ring_lock); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) continue; r = amdgpu_fence_wait_empty(ring); @@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); amd_sched_fini(&ring->sched); + del_timer_sync(&ring->fence_drv.fallback_timer); ring->fence_drv.initialized = false; } mutex_unlock(&adev->ring_lock); @@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) fence->fence_wake.func = amdgpu_fence_check_signaled; __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); fence_get(f); - amdgpu_fence_schedule_check(ring); + if (!timer_pending(&ring->fence_drv.fallback_timer)) + amdgpu_fence_schedule_fallback(ring); FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } +static void amdgpu_fence_release(struct fence *f) +{ + struct amdgpu_fence *fence = to_amdgpu_fence(f); + kmem_cache_free(amdgpu_fence_slab, fence); +} + const struct fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling, .signaled = amdgpu_fence_is_signaled, .wait = fence_default_wait, - .release = NULL, + .release = amdgpu_fence_release, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 087332858853..f6ea4b43a60c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, false); - if (r) { - mutex_unlock(&vm->mutex); + if (r) return r; - } bo_va = amdgpu_vm_bo_find(vm, rbo); if (!bo_va) { @@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ++bo_va->ref_count; } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); return 0; } @@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, true); if (r) { - mutex_unlock(&vm->mutex); dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); return; @@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, } } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); } static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) @@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, AMDGPU_GEM_USERPTR_REGISTER)) return -EINVAL; - if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || - !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { + if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( + !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || + !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { /* if we want to write to it we must require anonymous memory and install a MMU notifier */ @@ -483,6 +477,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (domain == AMDGPU_GEM_DOMAIN_CPU) goto error_unreserve; } + r = amdgpu_vm_update_page_directory(adev, bo_va->vm); + if (r) + goto error_unreserve; r = amdgpu_vm_clear_freed(adev, bo_va->vm); if (r) @@ -512,6 +509,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *rbo; struct amdgpu_bo_va *bo_va; + struct ttm_validate_buffer tv, tv_pd; + struct ww_acquire_ctx ticket; + struct list_head list, duplicates; uint32_t invalid_flags, va_flags = 0; int r = 0; @@ -547,19 +547,28 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - mutex_lock(&fpriv->vm.mutex); rbo = gem_to_amdgpu_bo(gobj); - r = amdgpu_bo_reserve(rbo, false); + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); + tv.bo = &rbo->tbo; + tv.shared = true; + list_add(&tv.head, &list); + + if (args->operation == AMDGPU_VA_OP_MAP) { + tv_pd.bo = &fpriv->vm.page_directory->tbo; + tv_pd.shared = true; + list_add(&tv_pd.head, &list); + } + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) { - mutex_unlock(&fpriv->vm.mutex); drm_gem_object_unreference_unlocked(gobj); return r; } bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { - amdgpu_bo_unreserve(rbo); - mutex_unlock(&fpriv->vm.mutex); + ttm_eu_backoff_reservation(&ticket, &list); + drm_gem_object_unreference_unlocked(gobj); return -ENOENT; } @@ -581,10 +590,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - + ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); - mutex_unlock(&fpriv->vm.mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index e65987743871..9e25edafa721 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, int r; if (size) { - r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); @@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, } if (ib->vm) - amdgpu_vm_fence(adev, ib->vm, ib->fence); + amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); amdgpu_ring_unlock_commit(ring); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1618e2294a16..e23843f4d877 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev, u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) { struct amdgpu_device *adev = dev->dev_private; + int vpos, hpos, stat; + u32 count; if (pipe >= adev->mode_info.num_crtc) { DRM_ERROR("Invalid crtc %u\n", pipe); return -EINVAL; } - return amdgpu_display_vblank_get_counter(adev, pipe); + /* The hw increments its frame counter at start of vsync, not at start + * of vblank, as is required by DRM core vblank counter handling. + * Cook the hw count here to make it appear to the caller as if it + * incremented at start of vblank. We measure distance to start of + * vblank in vpos. vpos therefore will be >= 0 between start of vblank + * and start of vsync, so vpos >= 0 means to bump the hw frame counter + * result by 1 to give the proper appearance to caller. + */ + if (adev->mode_info.crtcs[pipe]) { + /* Repeat readout if needed to provide stable result if + * we cross start of vsync during the queries. + */ + do { + count = amdgpu_display_vblank_get_counter(adev, pipe); + /* Ask amdgpu_get_crtc_scanoutpos to return vpos as + * distance to start of vblank, instead of regular + * vertical scanout pos. + */ + stat = amdgpu_get_crtc_scanoutpos( + dev, pipe, GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &adev->mode_info.crtcs[pipe]->base.hwmode); + } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); + + if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { + DRM_DEBUG_VBL("Query failed! stat %d\n", stat); + } else { + DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", + pipe, vpos); + + /* Bump counter if we are at >= leading edge of vblank, + * but before vsync where vpos would turn negative and + * the hw counter really increments. + */ + if (vpos >= 0) + count++; + } + } else { + /* Fallback to use value as is. */ + count = amdgpu_display_vblank_get_counter(adev, pipe); + DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); + } + + return count; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b62c1710cab6..064ebb347074 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -407,6 +407,7 @@ struct amdgpu_crtc { u32 line_time; u32 wm_low; u32 wm_high; + u32 lb_vblank_lead_lines; struct drm_display_mode hw_mode; }; @@ -528,6 +529,10 @@ struct amdgpu_framebuffer { #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ ((em) == ATOM_ENCODER_MODE_DP_MST)) +/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ +#define USE_REAL_VBLANKSTART (1 << 30) +#define GET_DISTANCE_TO_VBLANKSTART (1 << 31) + void amdgpu_link_encoder_connector(struct drm_device *dev); struct drm_connector * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0d524384ff79..c3ce103b6a33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) list_del_init(&bo->list); mutex_unlock(&bo->adev->gem.mutex); drm_gem_object_release(&bo->gem_base); + amdgpu_bo_unref(&bo->parent); kfree(bo->metadata); kfree(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 3c2ff4567798..ea756e77b023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager); int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager); -int amdgpu_sa_bo_new(struct amdgpu_device *adev, - struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo **sa_bo, - unsigned size, unsigned align); +int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, + struct amdgpu_sa_bo **sa_bo, + unsigned size, unsigned align); void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, struct fence *fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 0212b31dc194..8b88edb0434b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, return false; } -int amdgpu_sa_bo_new(struct amdgpu_device *adev, - struct amdgpu_sa_manager *sa_manager, +int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, struct amdgpu_sa_bo **sa_bo, unsigned size, unsigned align) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <drm/drmP.h> #include "amdgpu.h" +#include "amdgpu_trace.h" static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { @@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) return NULL; } job = to_amdgpu_job(sched_job); - mutex_lock(&job->job_lock); - r = amdgpu_ib_schedule(job->adev, - job->num_ibs, - job->ibs, - job->base.owner); + trace_amdgpu_sched_run_job(job); + r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); if (r) { DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; @@ -61,8 +59,6 @@ err: if (job->free_job) job->free_job(job); - mutex_unlock(&job->job_lock); - fence_put(&job->base.s_fence->base); kfree(job); return fence ? &fence->base : NULL; } @@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) { + kfree(job); + return -ENOMEM; + } + *f = fence_get(&job->base.s_fence->base); + job->adev = adev; job->ibs = ibs; job->num_ibs = num_ibs; - job->base.owner = owner; - mutex_init(&job->job_lock); + job->owner = owner; job->free_job = free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); - kfree(job); - return r; - } - *f = fence_get(&job->base.s_fence->base); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c index ff3ca52ec6fe..1caaf201b708 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c @@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev, if (*semaphore == NULL) { return -ENOMEM; } - r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, + r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, &(*semaphore)->sa_bo, 8, 8); if (r) { kfree(*semaphore); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6697fd05217..dd005c336c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || - (count >= AMDGPU_NUM_SYNCS)) { + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { + r = fence_wait(&fence->base, true); + if (r) + return r; + continue; + } + + if (count >= AMDGPU_NUM_SYNCS) { /* not enough room, wait manually */ r = fence_wait(&fence->base, false); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 76ecbaf72a2e..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, __entry->fences) ); +TRACE_EVENT(amdgpu_cs_ioctl, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + +TRACE_EVENT(amdgpu_sched_run_job, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + + TRACE_EVENT(amdgpu_vm_grab_id, TP_PROTO(unsigned vmid, int ring), TP_ARGS(vmid, ring), @@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) ); -DECLARE_EVENT_CLASS(amdgpu_fence_request, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno), - - TP_STRUCT__entry( - __field(u32, dev) - __field(int, ring) - __field(u32, seqno) - ), - - TP_fast_assign( - __entry->dev = dev->primary->index; - __entry->ring = ring; - __entry->seqno = seqno; - ), - - TP_printk("dev=%u, ring=%d, seqno=%u", - __entry->dev, __entry->ring, __entry->seqno) -); - -DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno) -); - -DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno) -); - -DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, - - TP_PROTO(struct drm_device *dev, int ring, u32 seqno), - - TP_ARGS(dev, ring, seqno) -); - DECLARE_EVENT_CLASS(amdgpu_semaphore_request, TP_PROTO(int ring, struct amdgpu_semaphore *sem), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 81bb8e9fc26d..8a1752ff3d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); int r; - if (gtt->userptr) - amdgpu_ttm_tt_pin_userptr(ttm); - + if (gtt->userptr) { + r = amdgpu_ttm_tt_pin_userptr(ttm); + if (r) { + DRM_ERROR("failed to pin userptr\n"); + return r; + } + } gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", @@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, if (mem && mem->mem_type != TTM_PL_SYSTEM) flags |= AMDGPU_PTE_VALID; - if (mem && mem->mem_type == TTM_PL_TT) + if (mem && mem->mem_type == TTM_PL_TT) { flags |= AMDGPU_PTE_SYSTEM; - if (!ttm || ttm->caching_state == tt_cached) - flags |= AMDGPU_PTE_SNOOPED; + if (ttm->caching_state == tt_cached) + flags |= AMDGPU_PTE_SNOOPED; + } if (adev->asic_type >= CHIP_TOPAZ) flags |= AMDGPU_PTE_EXECUTABLE; @@ -1073,10 +1078,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) ret = drm_mm_dump_table(m, mm); spin_unlock(&glob->lru_lock); if (ttm_pl == TTM_PL_VRAM) - seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", + seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", adev->mman.bdev.man[ttm_pl].size, - atomic64_read(&adev->vram_usage) >> 20, - atomic64_read(&adev->vram_vis_usage) >> 20); + (u64)atomic64_read(&adev->vram_usage) >> 20, + (u64)atomic64_read(&adev->vram_vis_usage) >> 20); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 03f0c3bae516..a745eeeb5d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000030; /* len */ + if ((ring->adev->vce.fw_version >> 24) >= 52) + ib->ptr[ib->length_dw++] = 0x00000040; /* len */ + else + ib->ptr[ib->length_dw++] = 0x00000030; /* len */ ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000042; @@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000100; ib->ptr[ib->length_dw++] = 0x0000000c; ib->ptr[ib->length_dw++] = 0x00000000; + if ((ring->adev->vce.fw_version >> 24) >= 52) { + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + } ib->ptr[ib->length_dw++] = 0x00000014; /* len */ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 633a32a48560..b53d273eb7a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, unsigned i; /* check if the id is still valid */ - if (vm_id->id && vm_id->last_id_use && - vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { - trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); - return 0; + if (vm_id->id) { + unsigned id = vm_id->id; + long owner; + + owner = atomic_long_read(&adev->vm_manager.ids[id].owner); + if (owner == (long)vm) { + trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); + return 0; + } } /* we definately need to flush */ @@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* skip over VMID 0, since it is the system VM */ for (i = 1; i < adev->vm_manager.nvm; ++i) { - struct fence *fence = adev->vm_manager.active[i]; + struct fence *fence = adev->vm_manager.ids[i].active; struct amdgpu_ring *fring; if (fence == NULL) { @@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (choices[i]) { struct fence *fence; - fence = adev->vm_manager.active[choices[i]]; + fence = adev->vm_manager.ids[choices[i]].active; vm_id->id = choices[i]; trace_amdgpu_vm_grab_id(choices[i], ring->idx); @@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; struct fence *flushed_updates = vm_id->flushed_updates; - bool is_earlier = false; + bool is_later; - if (flushed_updates && updates) { - BUG_ON(flushed_updates->context != updates->context); - is_earlier = (updates->seqno - flushed_updates->seqno <= - INT_MAX) ? true : false; - } - - if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || - is_earlier) { + if (!flushed_updates) + is_later = true; + else if (!updates) + is_later = false; + else + is_later = fence_is_later(updates, flushed_updates); + if (pd_addr != vm_id->pd_gpu_addr || is_later) { trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); - if (is_earlier) { + if (is_later) { vm_id->flushed_updates = fence_get(updates); fence_put(flushed_updates); } - if (!flushed_updates) - vm_id->flushed_updates = fence_get(updates); vm_id->pd_gpu_addr = pd_addr; amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); } @@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, */ void amdgpu_vm_fence(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_fence *fence) + struct fence *fence) { - unsigned ridx = fence->ring->idx; - unsigned vm_id = vm->ids[ridx].id; - - fence_put(adev->vm_manager.active[vm_id]); - adev->vm_manager.active[vm_id] = fence_get(&fence->base); + struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); + unsigned vm_id = vm->ids[ring->idx].id; - fence_put(vm->ids[ridx].last_id_use); - vm->ids[ridx].last_id_use = fence_get(&fence->base); + fence_put(adev->vm_manager.ids[vm_id].active); + adev->vm_manager.ids[vm_id].active = fence_get(fence); + atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); } /** @@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) * * @adev: amdgpu_device pointer * @bo: bo to clear + * + * need to reserve bo first before calling it. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo) @@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - r = amdgpu_bo_reserve(bo, false); - if (r) - return r; - r = reservation_object_reserve_shared(bo->tbo.resv); if (r) return r; r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (r) - goto error_unreserve; + goto error; addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); if (!ib) - goto error_unreserve; + goto error; r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); if (r) @@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (!r) amdgpu_bo_fence(bo, fence, true); fence_put(fence); - if (amdgpu_enable_scheduler) { - amdgpu_bo_unreserve(bo); + if (amdgpu_enable_scheduler) return 0; - } + error_free: amdgpu_ib_free(adev, ib); kfree(ib); -error_unreserve: - amdgpu_bo_unreserve(bo); +error: return r; } @@ -889,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; int r; + spin_lock(&vm->freed_lock); while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - + spin_unlock(&vm->freed_lock); r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); kfree(mapping); if (r) return r; + spin_lock(&vm->freed_lock); } + spin_unlock(&vm->freed_lock); + return 0; } @@ -926,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - + mutex_lock(&bo_va->mutex); r = amdgpu_vm_bo_update(adev, bo_va, NULL); + mutex_unlock(&bo_va->mutex); if (r) return r; @@ -971,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->vm_status); - + mutex_init(&bo_va->mutex); list_add_tail(&bo_va->bo_list, &bo->va); return bo_va; @@ -989,7 +990,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, * Add a mapping of the BO at the specefied addr into the VM. * Returns 0 for success, error for failure. * - * Object has to be reserved and gets unreserved by this function! + * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, @@ -1005,30 +1006,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) { - amdgpu_bo_unreserve(bo_va->bo); + size == 0 || size & AMDGPU_GPU_PAGE_MASK) return -EINVAL; - } /* make sure object fit at this offset */ eaddr = saddr + size; - if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { - amdgpu_bo_unreserve(bo_va->bo); + if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) return -EINVAL; - } last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; if (last_pfn > adev->vm_manager.max_pfn) { dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", last_pfn, adev->vm_manager.max_pfn); - amdgpu_bo_unreserve(bo_va->bo); return -EINVAL; } saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; + spin_lock(&vm->it_lock); it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); + spin_unlock(&vm->it_lock); if (it) { struct amdgpu_bo_va_mapping *tmp; tmp = container_of(it, struct amdgpu_bo_va_mapping, it); @@ -1036,14 +1034,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, tmp->it.start, tmp->it.last + 1); - amdgpu_bo_unreserve(bo_va->bo); r = -EINVAL; goto error; } mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) { - amdgpu_bo_unreserve(bo_va->bo); r = -ENOMEM; goto error; } @@ -1054,8 +1050,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; + mutex_lock(&bo_va->mutex); list_add(&mapping->list, &bo_va->invalids); + mutex_unlock(&bo_va->mutex); + spin_lock(&vm->it_lock); interval_tree_insert(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_map(bo_va, mapping); /* Make sure the page tables are allocated */ @@ -1067,8 +1067,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (eaddr > vm->max_pde_used) vm->max_pde_used = eaddr; - amdgpu_bo_unreserve(bo_va->bo); - /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { struct reservation_object *resv = vm->page_directory->tbo.resv; @@ -1077,16 +1075,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (vm->page_tables[pt_idx].bo) continue; - ww_mutex_lock(&resv->lock, NULL); r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS, NULL, resv, &pt); - ww_mutex_unlock(&resv->lock); if (r) goto error_free; + /* Keep a reference to the page table to avoid freeing + * them up in the wrong order. + */ + pt->parent = amdgpu_bo_ref(vm->page_directory); + r = amdgpu_vm_clear_bo(adev, pt); if (r) { amdgpu_bo_unref(&pt); @@ -1101,7 +1102,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, error_free: list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); kfree(mapping); @@ -1119,7 +1122,7 @@ error: * Remove a mapping of the BO at the specefied addr from the VM. * Returns 0 for success, error for failure. * - * Object has to be reserved and gets unreserved by this function! + * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, @@ -1130,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; - + mutex_lock(&bo_va->mutex); list_for_each_entry(mapping, &bo_va->valids, list) { if (mapping->it.start == saddr) break; @@ -1145,20 +1148,24 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, } if (&mapping->list == &bo_va->invalids) { - amdgpu_bo_unreserve(bo_va->bo); + mutex_unlock(&bo_va->mutex); return -ENOENT; } } - + mutex_unlock(&bo_va->mutex); list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); - if (valid) + if (valid) { + spin_lock(&vm->freed_lock); list_add(&mapping->list, &vm->freed); - else + spin_unlock(&vm->freed_lock); + } else { kfree(mapping); - amdgpu_bo_unreserve(bo_va->bo); + } return 0; } @@ -1187,17 +1194,23 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); trace_amdgpu_vm_bo_unmap(bo_va, mapping); + spin_lock(&vm->freed_lock); list_add(&mapping->list, &vm->freed); + spin_unlock(&vm->freed_lock); } list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_del(&mapping->list); + spin_lock(&vm->it_lock); interval_tree_remove(&mapping->it, &vm->va); + spin_unlock(&vm->it_lock); kfree(mapping); } - fence_put(bo_va->last_pt_update); + mutex_destroy(&bo_va->mutex); kfree(bo_va); } @@ -1241,15 +1254,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { vm->ids[i].id = 0; vm->ids[i].flushed_updates = NULL; - vm->ids[i].last_id_use = NULL; } - mutex_init(&vm->mutex); vm->va = RB_ROOT; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); - + spin_lock_init(&vm->it_lock); + spin_lock_init(&vm->freed_lock); pd_size = amdgpu_vm_directory_size(adev); pd_entries = amdgpu_vm_num_pdes(adev); @@ -1269,8 +1281,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) NULL, NULL, &vm->page_directory); if (r) return r; - + r = amdgpu_bo_reserve(vm->page_directory, false); + if (r) { + amdgpu_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + return r; + } r = amdgpu_vm_clear_bo(adev, vm->page_directory); + amdgpu_bo_unreserve(vm->page_directory); if (r) { amdgpu_bo_unref(&vm->page_directory); vm->page_directory = NULL; @@ -1313,11 +1331,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + unsigned id = vm->ids[i].id; + + atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, + (long)vm, 0); fence_put(vm->ids[i].flushed_updates); - fence_put(vm->ids[i].last_id_use); } - mutex_destroy(&vm->mutex); +} + +/** + * amdgpu_vm_manager_fini - cleanup VM manager + * + * @adev: amdgpu_device pointer + * + * Cleanup the VM manager and free resources. + */ +void amdgpu_vm_manager_fini(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < AMDGPU_NUM_VM; ++i) + fence_put(adev->vm_manager.ids[i].active); } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a1a35a5df8e7..57a2e347f04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; default: @@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index cb0f7747e3dc..4dcc8fba5792 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; @@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, (adev->mode_info.disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, amdgpu_crtc->line_time = line_time; amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 5af3721851d6..8f1e51128b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; @@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, (adev->mode_info.disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, amdgpu_crtc->line_time = line_time; amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4f7b49a6dc50..42d954dc436d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask; + u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { pixel_period = 1000000 / (u32)mode->clock; @@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, (adev->mode_info.disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, amdgpu_crtc->line_time = line_time; amdgpu_crtc->wm_high = latency_watermark_a; amdgpu_crtc->wm_low = latency_watermark_b; + /* Save number of lines the linebuffer leads before the scanout */ + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6776cf756d40..e1dcab98e249 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] = mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, - mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, @@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] = mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, - mmPCIE_INDEX, 0xffffffff, 0x0140001c, - mmPCIE_DATA, 0x000f0000, 0x00000000, - mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, - mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, }; @@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.max_cu_per_sh = 16; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 4; - adev->gfx.config.max_texture_channel_caches = 8; + adev->gfx.config.max_texture_channel_caches = 16; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 32; adev->gfx.config.max_hw_contexts = 8; @@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } case CHIP_FIJI: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 7: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 12: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 17: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 23: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 30: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 7: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + break; case CHIP_TONGA: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { @@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_TONGA: - case CHIP_FIJI: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x0000002A); break; + case CHIP_FIJI: + amdgpu_ring_write(ring, 0x3a00161a); + amdgpu_ring_write(ring, 0x0000002e); + break; case CHIP_TOPAZ: case CHIP_CARRIZO: amdgpu_ring_write(ring, 0x00000002); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 85bbcdc73fff..ed8abb58a785 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -40,7 +40,7 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); -MODULE_FIRMWARE("radeon/boniare_mc.bin"); +MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); /** @@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); WREG32(mmVM_L2_CNTL, tmp); tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); @@ -512,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle) static int gmc_v7_0_sw_fini(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v7_0_vm_fini(adev); adev->vm_manager.enabled = false; } @@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle) static int gmc_v7_0_suspend(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v7_0_vm_fini(adev); adev->vm_manager.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1bcc4e74e3b4..d39028440814 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); WREG32(mmVM_L2_CNTL, tmp); tmp = RREG32(mmVM_L2_CNTL2); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); @@ -656,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL4, tmp); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle) static int gmc_v8_0_sw_fini(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v8_0_vm_fini(adev); adev->vm_manager.enabled = false; } @@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle) static int gmc_v8_0_suspend(void *handle) { - int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { - for (i = 0; i < AMDGPU_NUM_VM; ++i) - fence_put(adev->vm_manager.active[i]); + amdgpu_vm_manager_fini(adev); gmc_v8_0_vm_fini(adev); adev->vm_manager.enabled = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6a52db6ad8d7..370c6c9d81c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -40,6 +40,9 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 +#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 #define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024) @@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) /* set BUSY flag */ WREG32_P(mmVCE_STATUS, 1, ~1); - - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + if (adev->asic_type >= CHIP_STONEY) + WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); + else + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, + ~VCE_VCPU_CNTL__CLK_EN_MASK); WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, @@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_VM_CTRL, 0); - - WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); + if (adev->asic_type >= CHIP_STONEY) { + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); + } else + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V3_0_FW_SIZE; WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); @@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: VCE\n"); + + WREG32_P(mmVCE_SYS_INT_STATUS, + VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, + ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + switch (entry->src_data) { case 0: amdgpu_fence_process(&adev->vce.ring[0]); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) + __field(struct amd_sched_job *, sched_job) + __field(struct fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; + __entry->sched_job = sched_job; + __entry->fence = &sched_job->s_fence->base; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->name, __entry->job_count, - __entry->hw_job_count) + TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) ); + +TRACE_EVENT(amd_sched_process_job, + TP_PROTO(struct amd_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->base; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 89619a5a4289..3a4820e863ec 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -30,10 +30,12 @@ #define CREATE_TRACE_POINTS #include "gpu_sched_trace.h" -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +struct kmem_cache *sched_fence_slab; +atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); + /* Initialize a given run queue struct */ static void amd_sched_rq_init(struct amd_sched_rq *rq) { @@ -61,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, } /** - * Select next job from a specified run queue with round robin policy. - * Return NULL if nothing available. + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. */ -static struct amd_sched_job * -amd_sched_rq_select_job(struct amd_sched_rq *rq) +static struct amd_sched_entity * +amd_sched_rq_select_entity(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } } } list_for_each_entry(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } if (entity == rq->current_entity) @@ -174,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) } /** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) +{ + if (kfifo_is_empty(&entity->job_queue)) + return false; + + if (ACCESS_ONCE(entity->dependency)) + return false; + + return true; +} + +/** * Destroy a context entity * * @sched Pointer to scheduler instance @@ -208,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct fence * fence = entity->dependency; + struct amd_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + fence_put(entity->dependency); + return false; + } + + s_fence = to_amd_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + /* Fence is from the same scheduler */ + if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { + /* Ignore it when it is already scheduled */ + fence_put(entity->dependency); + return false; + } + + /* Wait for fence to be scheduled */ + entity->cb.func = amd_sched_entity_wakeup; + list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); + return true; + } + + if (!fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + return true; + + fence_put(entity->dependency); + return false; +} + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; struct amd_sched_job *sched_job; - if (ACCESS_ONCE(entity->dependency)) - return NULL; - if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) { - - if (entity->dependency->context == entity->fence_context) { - /* We can ignore fences from ourself */ - fence_put(entity->dependency); - continue; - } - - if (fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - fence_put(entity->dependency); - else + while ((entity->dependency = sched->ops->dependency(sched_job))) + if (amd_sched_entity_add_dependency_cb(entity)) return NULL; - } return sched_job; } @@ -247,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) */ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) { + struct amd_gpu_scheduler *sched = sched_job->sched; struct amd_sched_entity *entity = sched_job->s_entity; bool added, first = false; @@ -261,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) /* first job wakes up scheduler */ if (first) - amd_sched_wakeup(sched_job->sched); + amd_sched_wakeup(sched); return added; } @@ -273,22 +315,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) * * Returns 0 for success, negative error code otherwise. */ -int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - struct amd_sched_fence *fence = amd_sched_fence_create( - entity, sched_job->owner); - - if (!fence) - return -ENOMEM; - - fence_get(&fence->base); - sched_job->s_fence = fence; + trace_amd_sched_job(sched_job); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); - trace_amd_sched_job(sched_job); - return 0; } /** @@ -310,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) } /** - * Select next to run + * Select next entity to process */ -static struct amd_sched_job * -amd_sched_select_job(struct amd_gpu_scheduler *sched) +static struct amd_sched_entity * +amd_sched_select_entity(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *sched_job; + struct amd_sched_entity *entity; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - sched_job = amd_sched_rq_select_job(&sched->kernel_rq); - if (sched_job == NULL) - sched_job = amd_sched_rq_select_job(&sched->sched_rq); + entity = amd_sched_rq_select_entity(&sched->kernel_rq); + if (entity == NULL) + entity = amd_sched_rq_select_entity(&sched->sched_rq); - return sched_job; + return entity; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) @@ -343,6 +376,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) list_del_init(&s_fence->list); spin_unlock_irqrestore(&sched->fence_list_lock, flags); } + trace_amd_sched_process_job(s_fence); fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } @@ -386,13 +420,16 @@ static int amd_sched_main(void *param) unsigned long flags; wait_event_interruptible(sched->wake_up_worker, - kthread_should_stop() || - (sched_job = amd_sched_select_job(sched))); + (entity = amd_sched_select_entity(sched)) || + kthread_should_stop()); + if (!entity) + continue; + + sched_job = amd_sched_entity_pop_job(entity); if (!sched_job) continue; - entity = sched_job->s_entity; s_fence = sched_job->s_fence; if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { @@ -405,6 +442,7 @@ static int amd_sched_main(void *param) atomic_inc(&sched->hw_rq_count); fence = sched->ops->run_job(sched_job); + amd_sched_fence_scheduled(s_fence); if (fence) { r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); @@ -450,6 +488,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); atomic_set(&sched->hw_rq_count, 0); + if (atomic_inc_return(&sched_fence_slab_ref) == 1) { + sched_fence_slab = kmem_cache_create( + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); @@ -470,4 +515,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) { if (sched->thread) kthread_stop(sched->thread); + if (atomic_dec_and_test(&sched_fence_slab_ref)) + kmem_cache_destroy(sched_fence_slab); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 929e9aced041..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,9 +27,14 @@ #include <linux/kfifo.h> #include <linux/fence.h> +#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS + struct amd_gpu_scheduler; struct amd_sched_rq; +extern struct kmem_cache *sched_fence_slab; +extern atomic_t sched_fence_slab_ref; + /** * A scheduler entity is a wrapper around a job queue or a group * of other entities. Entities take turns emitting jobs from their @@ -65,6 +70,7 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; struct fence_cb cb; + struct list_head scheduled_cb; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; @@ -76,7 +82,6 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - void *owner; }; extern const struct fence_ops amd_sched_fence_ops; @@ -128,11 +133,11 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, uint32_t jobs); void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); -int amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_scheduled(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence); - #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index d802638094f4..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -32,9 +32,11 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity struct amd_sched_fence *fence = NULL; unsigned seq; - fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); + fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; + + INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; fence->sched = s_entity->sched; spin_lock_init(&fence->lock); @@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) FENCE_TRACE(&fence->base, "was already signaled\n"); } +void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +{ + struct fence_cb *cur, *tmp; + + set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); + list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { + list_del_init(&cur->node); + cur->func(&s_fence->base, cur); + } +} + static const char *amd_sched_fence_get_driver_name(struct fence *fence) { return "amd_sched"; @@ -71,11 +84,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) return true; } +static void amd_sched_fence_release(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + kmem_cache_free(sched_fence_slab, fence); +} + const struct fence_ops amd_sched_fence_ops = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, .wait = fence_default_wait, - .release = NULL, + .release = amd_sched_fence_release, }; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 7bb3845d9974..aeee083c7f95 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state, return ret; } +/** + * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers. + * + * @dev: drm device to check. + * @plane_mask: plane mask for planes that were updated. + * @ret: return value, can be -EDEADLK for a retry. + * + * Before doing an update plane->old_fb is set to plane->fb, + * but before dropping the locks old_fb needs to be set to NULL + * and plane->fb updated. This is a common operation for each + * atomic update, so this call is split off as a helper. + */ +void drm_atomic_clean_old_fb(struct drm_device *dev, + unsigned plane_mask, + int ret) +{ + struct drm_plane *plane; + + /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping + * locks (ie. while it is still safe to deref plane->state). We + * need to do this here because the driver entry points cannot + * distinguish between legacy and atomic ioctls. + */ + drm_for_each_plane_mask(plane, dev, plane_mask) { + if (ret == 0) { + struct drm_framebuffer *new_fb = plane->state->fb; + if (new_fb) + drm_framebuffer_reference(new_fb); + plane->fb = new_fb; + plane->crtc = plane->state->crtc; + + if (plane->old_fb) + drm_framebuffer_unreference(plane->old_fb); + } + plane->old_fb = NULL; + } +} +EXPORT_SYMBOL(drm_atomic_clean_old_fb); + int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_plane *plane; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - unsigned plane_mask = 0; + unsigned plane_mask; int ret = 0; unsigned int i, j; @@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); retry: + plane_mask = 0; copied_objs = 0; copied_props = 0; @@ -1576,24 +1616,7 @@ retry: } out: - /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping - * locks (ie. while it is still safe to deref plane->state). We - * need to do this here because the driver entry points cannot - * distinguish between legacy and atomic ioctls. - */ - drm_for_each_plane_mask(plane, dev, plane_mask) { - if (ret == 0) { - struct drm_framebuffer *new_fb = plane->state->fb; - if (new_fb) - drm_framebuffer_reference(new_fb); - plane->fb = new_fb; - plane->crtc = plane->state->crtc; - - if (plane->old_fb) - drm_framebuffer_unreference(plane->old_fb); - } - plane->old_fb = NULL; - } + drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { /* diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0c6f62168776..e5aec45bf985 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) return -EINVAL; } + if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n", + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); + return -EINVAL; + } + if (new_encoder == connector_state->best_encoder) { DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", connector->base.id, @@ -1553,6 +1561,9 @@ retry: goto fail; } + if (plane_state->crtc && (plane == plane->crtc->cursor)) + plane_state->state->legacy_cursor_update = true; + ret = __drm_atomic_helper_disable_plane(plane, plane_state); if (ret != 0) goto fail; @@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane, plane_state->src_h = 0; plane_state->src_w = 0; - if (plane->crtc && (plane == plane->crtc->cursor)) - plane_state->state->legacy_cursor_update = true; - return 0; } @@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_crtc_state *crtc_state; struct drm_plane_state *primary_state; struct drm_crtc *crtc = set->crtc; + int hdisplay, vdisplay; int ret; crtc_state = drm_atomic_get_crtc_state(state, crtc); @@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, if (ret != 0) return ret; + drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); + drm_atomic_set_fb_for_plane(primary_state, set->fb); primary_state->crtc_x = 0; primary_state->crtc_y = 0; - primary_state->crtc_h = set->mode->vdisplay; - primary_state->crtc_w = set->mode->hdisplay; + primary_state->crtc_h = vdisplay; + primary_state->crtc_w = hdisplay; primary_state->src_x = set->x << 16; primary_state->src_y = set->y << 16; if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { - primary_state->src_h = set->mode->hdisplay << 16; - primary_state->src_w = set->mode->vdisplay << 16; + primary_state->src_h = hdisplay << 16; + primary_state->src_w = vdisplay << 16; } else { - primary_state->src_h = set->mode->vdisplay << 16; - primary_state->src_w = set->mode->hdisplay << 16; + primary_state->src_h = vdisplay << 16; + primary_state->src_w = hdisplay << 16; } commit: diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9362609df38a..7dd6728dd092 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, goto out_unlock; } + if (!file_priv->allowed_master) { + ret = drm_new_set_master(dev, file_priv); + goto out_unlock; + } + file_priv->minor->master = drm_master_get(file_priv->master); file_priv->is_master = 1; if (dev->driver->master_set) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e673c13c7391..69cbab5e5c81 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) struct drm_plane *plane; struct drm_atomic_state *state; int i, ret; + unsigned plane_mask; state = drm_atomic_state_alloc(dev); if (!state) @@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) state->acquire_ctx = dev->mode_config.acquire_ctx; retry: + plane_mask = 0; drm_for_each_plane(plane, dev) { struct drm_plane_state *plane_state; - plane->old_fb = plane->fb; - plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); @@ -362,6 +362,9 @@ retry: plane_state->rotation = BIT(DRM_ROTATE_0); + plane->old_fb = plane->fb; + plane_mask |= 1 << drm_plane_index(plane); + /* disable non-primary: */ if (plane->type == DRM_PLANE_TYPE_PRIMARY) continue; @@ -382,19 +385,7 @@ retry: ret = drm_atomic_commit(state); fail: - drm_for_each_plane(plane, dev) { - if (ret == 0) { - struct drm_framebuffer *new_fb = plane->state->fb; - if (new_fb) - drm_framebuffer_reference(new_fb); - plane->fb = new_fb; - plane->crtc = plane->state->crtc; - - if (plane->old_fb) - drm_framebuffer_unreference(plane->old_fb); - } - plane->old_fb = NULL; - } + drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) goto backoff; @@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_atomic_state *state; + struct drm_plane *plane; int i, ret; + unsigned plane_mask; state = drm_atomic_state_alloc(dev); if (!state) @@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, state->acquire_ctx = dev->mode_config.acquire_ctx; retry: + plane_mask = 0; for(i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set; mode_set = &fb_helper->crtc_info[i].mode_set; - mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb; - mode_set->x = var->xoffset; mode_set->y = var->yoffset; ret = __drm_atomic_helper_set_config(mode_set, state); if (ret != 0) goto fail; + + plane = mode_set->crtc->primary; + plane_mask |= drm_plane_index(plane); + plane->old_fb = plane->fb; } ret = drm_atomic_commit(state); @@ -1268,26 +1264,7 @@ retry: fail: - for(i = 0; i < fb_helper->crtc_count; i++) { - struct drm_mode_set *mode_set; - struct drm_plane *plane; - - mode_set = &fb_helper->crtc_info[i].mode_set; - plane = mode_set->crtc->primary; - - if (ret == 0) { - struct drm_framebuffer *new_fb = plane->state->fb; - - if (new_fb) - drm_framebuffer_reference(new_fb); - plane->fb = new_fb; - plane->crtc = plane->state->crtc; - - if (plane->old_fb) - drm_framebuffer_unreference(plane->old_fb); - } - plane->old_fb = NULL; - } + drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) goto backoff; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c59ce4d0ef75..6b5625e66119 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -126,6 +126,60 @@ static int drm_cpu_valid(void) } /** + * drm_new_set_master - Allocate a new master object and become master for the + * associated master realm. + * + * @dev: The associated device. + * @fpriv: File private identifying the client. + * + * This function must be called with dev::struct_mutex held. + * Returns negative error code on failure. Zero on success. + */ +int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) +{ + struct drm_master *old_master; + int ret; + + lockdep_assert_held_once(&dev->master_mutex); + + /* create a new master */ + fpriv->minor->master = drm_master_create(fpriv->minor); + if (!fpriv->minor->master) + return -ENOMEM; + + /* take another reference for the copy in the local file priv */ + old_master = fpriv->master; + fpriv->master = drm_master_get(fpriv->minor->master); + + if (dev->driver->master_create) { + ret = dev->driver->master_create(dev, fpriv->master); + if (ret) + goto out_err; + } + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, fpriv, true); + if (ret) + goto out_err; + } + + fpriv->is_master = 1; + fpriv->allowed_master = 1; + fpriv->authenticated = 1; + if (old_master) + drm_master_put(&old_master); + + return 0; + +out_err: + /* drop both references and restore old master on failure */ + drm_master_put(&fpriv->minor->master); + drm_master_put(&fpriv->master); + fpriv->master = old_master; + + return ret; +} + +/** * Called whenever a process opens /dev/drm. * * \param filp file pointer. @@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) mutex_lock(&dev->master_mutex); if (drm_is_primary_client(priv) && !priv->minor->master) { /* create a new master */ - priv->minor->master = drm_master_create(priv->minor); - if (!priv->minor->master) { - ret = -ENOMEM; + ret = drm_new_set_master(dev, priv); + if (ret) goto out_close; - } - - priv->is_master = 1; - /* take another reference for the copy in the local file priv */ - priv->master = drm_master_get(priv->minor->master); - priv->authenticated = 1; - - if (dev->driver->master_create) { - ret = dev->driver->master_create(dev, priv->master); - if (ret) { - /* drop both references if this fails */ - drm_master_put(&priv->minor->master); - drm_master_put(&priv->master); - goto out_close; - } - } - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, priv, true); - if (ret) { - /* drop both references if this fails */ - drm_master_put(&priv->minor->master); - drm_master_put(&priv->master); - goto out_close; - } - } } else if (drm_is_primary_client(priv)) { /* get a reference to the master */ priv->master = drm_master_get(priv->minor->master); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2151ea551d3b..607f493ae801 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, unsigned long seq, struct timeval *now) { - WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); + assert_spin_locked(&dev->event_lock); + e->event.sequence = seq; e->event.tv_sec = now->tv_sec; e->event.tv_usec = now->tv_usec; @@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev, } /** + * drm_arm_vblank_event - arm vblank event after pageflip + * @dev: DRM device + * @pipe: CRTC index + * @e: the event to prepare to send + * + * A lot of drivers need to generate vblank events for the very next vblank + * interrupt. For example when the page flip interrupt happens when the page + * flip gets armed, but not when it actually executes within the next vblank + * period. This helper function implements exactly the required vblank arming + * behaviour. + * + * Caller must hold event lock. Caller must also hold a vblank reference for + * the event @e, which will be dropped when the next vblank arrives. + * + * This is the legacy version of drm_crtc_arm_vblank_event(). + */ +void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e) +{ + assert_spin_locked(&dev->event_lock); + + e->pipe = pipe; + e->event.sequence = drm_vblank_count(dev, pipe); + list_add_tail(&e->base.link, &dev->vblank_event_list); +} +EXPORT_SYMBOL(drm_arm_vblank_event); + +/** + * drm_crtc_arm_vblank_event - arm vblank event after pageflip + * @crtc: the source CRTC of the vblank event + * @e: the event to send + * + * A lot of drivers need to generate vblank events for the very next vblank + * interrupt. For example when the page flip interrupt happens when the page + * flip gets armed, but not when it actually executes within the next vblank + * period. This helper function implements exactly the required vblank arming + * behaviour. + * + * Caller must hold event lock. Caller must also hold a vblank reference for + * the event @e, which will be dropped when the next vblank arrives. + * + * This is the native KMS version of drm_arm_vblank_event(). + */ +void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e) +{ + drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); +} +EXPORT_SYMBOL(drm_crtc_arm_vblank_event); + +/** * drm_send_vblank_event - helper to send vblank event after pageflip * @dev: DRM device * @pipe: CRTC index diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a3b22bdacd44..8aab974b0564 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "AUX_C"; case POWER_DOMAIN_AUX_D: return "AUX_D"; + case POWER_DOMAIN_GMBUS: + return "GMBUS"; case POWER_DOMAIN_INIT: return "INIT"; default: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8afda459a26e..a01e51581c4c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -199,6 +199,7 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, + POWER_DOMAIN_GMBUS, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, @@ -351,6 +352,8 @@ enum intel_dpll_id { /* hsw/bdw */ DPLL_ID_WRPLL1 = 0, DPLL_ID_WRPLL2 = 1, + DPLL_ID_SPLL = 2, + /* skl */ DPLL_ID_SKL_DPLL1 = 0, DPLL_ID_SKL_DPLL2 = 1, @@ -367,6 +370,7 @@ struct intel_dpll_hw_state { /* hsw, bdw */ uint32_t wrpll; + uint32_t spll; /* skl */ /* @@ -2648,6 +2652,7 @@ struct i915_params { int enable_cmd_parser; /* leave bools at the end to not create holes */ bool enable_hangcheck; + bool fastboot; bool prefault_disable; bool load_detect_test; bool reset; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5cf4a1998273..32e6aade6223 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req, if (i915_gem_request_completed(req, true)) return 0; - timeout_expire = timeout ? - jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; + timeout_expire = 0; + if (timeout) { + if (WARN_ON(*timeout < 0)) + return -EINVAL; + + if (*timeout == 0) + return -ETIME; + + timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + } if (INTEL_INFO(dev_priv)->gen >= 6) gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); @@ -3809,6 +3817,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; enum i915_cache_level level; @@ -3837,9 +3846,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + intel_runtime_pm_get(dev_priv); + ret = i915_mutex_lock_interruptible(dev); if (ret) - return ret; + goto rpm_put; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { @@ -3852,6 +3863,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); +rpm_put: + intel_runtime_pm_put(dev_priv); + return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 40a10b25956c..f010391b87f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } /* check for L-shaped memory aka modified enhanced addressing */ - if (IS_GEN4(dev)) { - uint32_t ddc2 = I915_READ(DCC2); - - if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) - dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + if (IS_GEN4(dev) && + !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } if (dcc == 0xffffffff) { @@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) * matching, which was the case for the swizzling required in * the table above, or from the 1-ch value being less than * the minimum size of a rank. + * + * Reports indicate that the swizzling actually + * varies depending upon page placement inside the + * channels, i.e. we see swizzled pages where the + * banks of memory are paired and unswizzled on the + * uneven portion, so leave that as unknown. */ - if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { - swizzle_x = I915_BIT_6_SWIZZLE_NONE; - swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else { + if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } } + if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || + swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { + /* Userspace likes to explode if it sees unknown swizzling, + * so lie. We will finish the lie when reporting through + * the get-tiling-ioctl by reporting the physical swizzle + * mode as unknown instead. + * + * As we don't strictly know what the swizzling is, it may be + * bit17 dependent, and so we need to also prevent the pages + * from being moved. + */ + dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } + dev_priv->mm.bit_6_swizzle_x = swizzle_x; dev_priv->mm.bit_6_swizzle_y = swizzle_y; } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 96bb23865eac..4be13a5eb932 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = { .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), .disable_power_well = -1, .enable_ips = 1, + .fastboot = 0, .prefault_disable = 0, .load_detect_test = 0, .reset = true, @@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well, module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); +module_param_named(fastboot, i915.fastboot, bool, 0600); +MODULE_PARM_DESC(fastboot, + "Try to skip unnecessary mode sets at boot time (default: false)"); + module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); MODULE_PARM_DESC(prefault_disable, "Disable page prefaulting for pread/pwrite/reloc (default:false). " diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b84aaa0bb48a..6a2c76e367a5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); } -static void hsw_crt_pre_enable(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n"); - I915_WRITE(SPLL_CTL, - SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC); - POSTING_READ(SPLL_CTL); - udelay(20); -} - /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder) intel_disable_crt(encoder); } -static void hsw_crt_post_disable(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val; - - DRM_DEBUG_KMS("Disabling SPLL\n"); - val = I915_READ(SPLL_CTL); - WARN_ON(!(val & SPLL_PLL_ENABLE)); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); -} - static void intel_enable_crt(struct intel_encoder *encoder) { struct intel_crt *crt = intel_encoder_to_crt(encoder); @@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, if (HAS_DDI(dev)) { pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; pipe_config->port_clock = 135000 * 2; + + pipe_config->dpll_hw_state.wrpll = 0; + pipe_config->dpll_hw_state.spll = + SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; } return true; @@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev) if (HAS_DDI(dev)) { crt->base.get_config = hsw_crt_get_config; crt->base.get_hw_state = intel_ddi_get_hw_state; - crt->base.pre_enable = hsw_crt_pre_enable; - crt->base.post_disable = hsw_crt_post_disable; } else { crt->base.get_config = intel_crt_get_config; crt->base.get_hw_state = intel_crt_get_hw_state; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b25e99a432fb..a6752a61d99f 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, } crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); + } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) { + struct drm_atomic_state *state = crtc_state->base.state; + struct intel_shared_dpll_config *spll = + &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL]; + + if (spll->crtc_mask && + WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll)) + return false; + + crtc_state->shared_dpll = DPLL_ID_SPLL; + spll->hw_state.spll = crtc_state->dpll_hw_state.spll; + spll->crtc_mask |= 1 << intel_crtc->pipe; } return true; @@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) } } -static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, +static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); @@ -2445,9 +2457,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, udelay(20); } -static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, +static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); + POSTING_READ(SPLL_CTL); + udelay(20); +} + +static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ uint32_t val; val = I915_READ(WRPLL_CTL(pll->id)); @@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, POSTING_READ(WRPLL_CTL(pll->id)); } -static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) +static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + + val = I915_READ(SPLL_CTL); + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); + POSTING_READ(SPLL_CTL); +} + +static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) { uint32_t val; @@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, return val & WRPLL_PLL_ENABLE; } +static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + val = I915_READ(SPLL_CTL); + hw_state->spll = val; + + return val & SPLL_PLL_ENABLE; +} + + static const char * const hsw_ddi_pll_names[] = { "WRPLL 1", "WRPLL 2", + "SPLL" }; static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) { int i; - dev_priv->num_shared_dpll = 2; + dev_priv->num_shared_dpll = 3; - for (i = 0; i < dev_priv->num_shared_dpll; i++) { + for (i = 0; i < 2; i++) { dev_priv->shared_dplls[i].id = i; dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; - dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; - dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; + dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable; + dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable; dev_priv->shared_dplls[i].get_hw_state = - hsw_ddi_pll_get_hw_state; + hsw_ddi_wrpll_get_hw_state; } + + /* SPLL is special, but needs to be initialized anyway.. */ + dev_priv->shared_dplls[i].id = i; + dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; + dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable; + dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable; + dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state; + } static const char * const skl_ddi_pll_names[] = { diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f62ffc04c21d..22e86d2e408d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, return; valid_fb: - plane_state->src_x = plane_state->src_y = 0; + plane_state->src_x = 0; + plane_state->src_y = 0; plane_state->src_w = fb->width << 16; plane_state->src_h = fb->height << 16; - plane_state->crtc_x = plane_state->src_y = 0; + plane_state->crtc_x = 0; + plane_state->crtc_y = 0; plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; @@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, struct intel_shared_dpll *pll; struct intel_shared_dpll_config *shared_dpll; enum intel_dpll_id i; + int max = dev_priv->num_shared_dpll; shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); @@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, WARN_ON(shared_dpll[i].crtc_mask); goto found; - } + } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) + /* Do not consider SPLL */ + max = 2; - for (i = 0; i < dev_priv->num_shared_dpll; i++) { + for (i = 0; i < max; i++) { pll = &dev_priv->shared_dplls[i]; /* Only want to check enabled timings first */ @@ -5189,11 +5194,31 @@ static enum intel_display_power_domain port_to_power_domain(enum port port) case PORT_E: return POWER_DOMAIN_PORT_DDI_E_2_LANES; default: - WARN_ON_ONCE(1); + MISSING_CASE(port); return POWER_DOMAIN_PORT_OTHER; } } +static enum intel_display_power_domain port_to_aux_power_domain(enum port port) +{ + switch (port) { + case PORT_A: + return POWER_DOMAIN_AUX_A; + case PORT_B: + return POWER_DOMAIN_AUX_B; + case PORT_C: + return POWER_DOMAIN_AUX_C; + case PORT_D: + return POWER_DOMAIN_AUX_D; + case PORT_E: + /* FIXME: Check VBT for actual wiring of PORT E */ + return POWER_DOMAIN_AUX_D; + default: + MISSING_CASE(port); + return POWER_DOMAIN_AUX_A; + } +} + #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ if ((1 << (domain)) & (mask)) @@ -5225,6 +5250,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) } } +enum intel_display_power_domain +intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct intel_digital_port *intel_dig_port; + + switch (intel_encoder->type) { + case INTEL_OUTPUT_UNKNOWN: + case INTEL_OUTPUT_HDMI: + /* + * Only DDI platforms should ever use these output types. + * We can get here after the HDMI detect code has already set + * the type of the shared encoder. Since we can't be sure + * what's the status of the given connectors, play safe and + * run the DP detection too. + */ + WARN_ON_ONCE(!HAS_DDI(dev)); + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_EDP: + intel_dig_port = enc_to_dig_port(&intel_encoder->base); + return port_to_aux_power_domain(intel_dig_port->port); + case INTEL_OUTPUT_DP_MST: + intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; + return port_to_aux_power_domain(intel_dig_port->port); + default: + MISSING_CASE(intel_encoder->type); + return POWER_DOMAIN_AUX_A; + } +} + static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -9723,6 +9778,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, case PORT_CLK_SEL_WRPLL2: pipe_config->shared_dpll = DPLL_ID_WRPLL2; break; + case PORT_CLK_SEL_SPLL: + pipe_config->shared_dpll = DPLL_ID_SPLL; } } @@ -12003,9 +12060,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->dpll_hw_state.cfgcr1, pipe_config->dpll_hw_state.cfgcr2); } else if (HAS_DDI(dev)) { - DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", + DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", pipe_config->ddi_pll_sel, - pipe_config->dpll_hw_state.wrpll); + pipe_config->dpll_hw_state.wrpll, + pipe_config->dpll_hw_state.spll); } else { DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " "fp0: 0x%x, fp1: 0x%x\n", @@ -12452,7 +12510,6 @@ intel_pipe_config_compare(struct drm_device *dev, if (INTEL_INFO(dev)->gen < 8) { PIPE_CONF_CHECK_M_N(dp_m_n); - PIPE_CONF_CHECK_I(has_drrs); if (current_config->has_drrs) PIPE_CONF_CHECK_M_N(dp_m2_n2); } else @@ -12528,6 +12585,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); + PIPE_CONF_CHECK_X(dpll_hw_state.spll); PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); @@ -13032,6 +13090,9 @@ static int intel_atomic_check(struct drm_device *dev, struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); + memset(&to_intel_crtc(crtc)->atomic, 0, + sizeof(struct intel_crtc_atomic_commit)); + /* Catch I915_MODE_FLAG_INHERITED */ if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) crtc_state->mode_changed = true; @@ -13056,7 +13117,8 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; - if (intel_pipe_config_compare(state->dev, + if (i915.fastboot && + intel_pipe_config_compare(state->dev, to_intel_crtc_state(crtc->state), pipe_config, true)) { crtc_state->mode_changed = false; @@ -14364,16 +14426,17 @@ static int intel_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, - struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_mode_fb_cmd2 *user_mode_cmd) { struct drm_i915_gem_object *obj; + struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; obj = to_intel_bo(drm_gem_object_lookup(dev, filp, - mode_cmd->handles[0])); + mode_cmd.handles[0])); if (&obj->base == NULL) return ERR_PTR(-ENOENT); - return intel_framebuffer_create(dev, mode_cmd, obj); + return intel_framebuffer_create(dev, &mode_cmd, obj); } #ifndef CONFIG_DRM_FBDEV_EMULATION @@ -14705,6 +14768,9 @@ static struct intel_quirk intel_quirks[] = { /* Apple Macbook 2,1 (Core 2 T7400) */ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, + /* Apple Macbook 4,1 */ + { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, + /* Toshiba CB35 Chromebook (Celeron 2955U) */ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09bdd94ca3ba..78b8ec84d576 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp) * See vlv_power_sequencer_reset() why we need * a power domain reference here. */ - power_domain = intel_display_port_power_domain(encoder); + power_domain = intel_display_port_aux_power_domain(encoder); intel_display_power_get(dev_priv, power_domain); mutex_lock(&dev_priv->pps_mutex); @@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp) mutex_unlock(&dev_priv->pps_mutex); - power_domain = intel_display_port_power_domain(encoder); + power_domain = intel_display_port_aux_power_domain(encoder); intel_display_power_put(dev_priv, power_domain); } @@ -816,8 +816,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, intel_dp_check_edp(intel_dp); - intel_aux_display_runtime_get(dev_priv); - /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = I915_READ_NOTRACE(ch_ctl); @@ -926,7 +924,6 @@ done: ret = recv_bytes; out: pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); - intel_aux_display_runtime_put(dev_priv); if (vdd) edp_panel_vdd_off(intel_dp, false); @@ -1784,7 +1781,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (edp_have_panel_vdd(intel_dp)) return need_to_disable; - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", @@ -1874,7 +1871,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if ((pp & POWER_TARGET_ON) == 0) intel_dp->last_power_cycle = jiffies; - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); } @@ -2025,7 +2022,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) wait_panel_off(intel_dp); /* We got a reference when we enabled the VDD. */ - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); } @@ -4765,26 +4762,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) intel_dp->has_audio = false; } -static enum intel_display_power_domain -intel_dp_power_get(struct intel_dp *dp) -{ - struct intel_encoder *encoder = &dp_to_dig_port(dp)->base; - enum intel_display_power_domain power_domain; - - power_domain = intel_display_port_power_domain(encoder); - intel_display_power_get(to_i915(encoder->base.dev), power_domain); - - return power_domain; -} - -static void -intel_dp_power_put(struct intel_dp *dp, - enum intel_display_power_domain power_domain) -{ - struct intel_encoder *encoder = &dp_to_dig_port(dp)->base; - intel_display_power_put(to_i915(encoder->base.dev), power_domain); -} - static enum drm_connector_status intel_dp_detect(struct drm_connector *connector, bool force) { @@ -4808,7 +4785,8 @@ intel_dp_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } - power_domain = intel_dp_power_get(intel_dp); + power_domain = intel_display_port_aux_power_domain(intel_encoder); + intel_display_power_get(to_i915(dev), power_domain); /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) @@ -4853,7 +4831,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) } out: - intel_dp_power_put(intel_dp, power_domain); + intel_display_power_put(to_i915(dev), power_domain); return status; } @@ -4862,6 +4840,7 @@ intel_dp_force(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); enum intel_display_power_domain power_domain; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -4871,11 +4850,12 @@ intel_dp_force(struct drm_connector *connector) if (connector->status != connector_status_connected) return; - power_domain = intel_dp_power_get(intel_dp); + power_domain = intel_display_port_aux_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); intel_dp_set_edid(intel_dp); - intel_dp_power_put(intel_dp, power_domain); + intel_display_power_put(dev_priv, power_domain); if (intel_encoder->type != INTEL_OUTPUT_EDP) intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; @@ -5091,7 +5071,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) * indefinitely. */ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); - power_domain = intel_display_port_power_domain(&intel_dig_port->base); + power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base); intel_display_power_get(dev_priv, power_domain); edp_panel_vdd_schedule_off(intel_dp); @@ -5153,7 +5133,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) enum intel_display_power_domain power_domain; enum irqreturn ret = IRQ_NONE; - if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && + intel_dig_port->base.type != INTEL_OUTPUT_HDMI) intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { @@ -5172,7 +5153,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) port_name(intel_dig_port->port), long_hpd ? "long" : "short"); - power_domain = intel_display_port_power_domain(intel_encoder); + power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); if (long_hpd) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0598932ce623..f2a1142bff34 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1169,6 +1169,8 @@ void hsw_enable_ips(struct intel_crtc *crtc); void hsw_disable_ips(struct intel_crtc *crtc); enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder); +enum intel_display_power_domain +intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config); void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); @@ -1377,8 +1379,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); -void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); void intel_runtime_pm_get(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9eafa191cee2..81cdd9ff3892 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1335,21 +1335,17 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct intel_encoder *intel_encoder = - &hdmi_to_dig_port(intel_hdmi)->base; - enum intel_display_power_domain power_domain; struct edid *edid = NULL; bool connected = false; - power_domain = intel_display_port_power_domain(intel_encoder); - intel_display_power_get(dev_priv, power_domain); + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (force) edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus)); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); to_intel_connector(connector)->detect_edid = edid; if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { @@ -1383,6 +1379,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + while (!live_status && --retry) { live_status = intel_digital_port_connected(dev_priv, hdmi_to_dig_port(intel_hdmi)); @@ -1402,6 +1400,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) } else status = connector_status_disconnected; + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + return status; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1369fc41d039..8324654037b6 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter, int i = 0, inc, try = 0; int ret = 0; - intel_aux_display_runtime_get(dev_priv); + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); mutex_lock(&dev_priv->gmbus_mutex); if (bus->force_bit) { @@ -595,7 +595,9 @@ timeout: out: mutex_unlock(&dev_priv->gmbus_mutex); - intel_aux_display_runtime_put(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + return ret; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d52a15df6917..071a76b9ac52 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) POSTING_READ(GEN6_RPNSWREQ); dev_priv->rps.cur_freq = val; - trace_intel_gpu_freq_change(val * 50); + trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); } static void valleyview_set_rps(struct drm_device *dev, u8 val) @@ -7255,7 +7255,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) { if (IS_GEN9(dev_priv->dev)) - return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; + return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, + GEN9_FREQ_SCALER); else if (IS_CHERRYVIEW(dev_priv->dev)) return chv_gpu_freq(dev_priv, val); else if (IS_VALLEYVIEW(dev_priv->dev)) @@ -7267,13 +7268,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) { if (IS_GEN9(dev_priv->dev)) - return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; + return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, + GT_FREQUENCY_MULTIPLIER); else if (IS_CHERRYVIEW(dev_priv->dev)) return chv_freq_opcode(dev_priv, val); else if (IS_VALLEYVIEW(dev_priv->dev)) return byt_freq_opcode(dev_priv, val); else - return val / GT_FREQUENCY_MULTIPLIER; + return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); } struct request_boost { diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index d89c1d0aa1b7..7e23d65c9b24 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -362,6 +362,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_AUDIO) | \ BIT(POWER_DOMAIN_VGA) | \ + BIT(POWER_DOMAIN_GMBUS) | \ BIT(POWER_DOMAIN_INIT)) #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ @@ -1483,6 +1484,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_AUX_B) | \ BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_AUX_D) | \ + BIT(POWER_DOMAIN_GMBUS) | \ BIT(POWER_DOMAIN_INIT)) #define HSW_DISPLAY_POWER_DOMAINS ( \ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ @@ -1845,6 +1847,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, i915.disable_power_well); + BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); + mutex_init(&power_domains->lock); /* @@ -2064,36 +2068,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) } /** - * intel_aux_display_runtime_get - grab an auxiliary power domain reference - * @dev_priv: i915 device instance - * - * This function grabs a power domain reference for the auxiliary power domain - * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its - * parents are powered up. Therefore users should only grab a reference to the - * innermost power domain they need. - * - * Any power domain reference obtained by this function must have a symmetric - * call to intel_aux_display_runtime_put() to release the reference again. - */ -void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) -{ - intel_runtime_pm_get(dev_priv); -} - -/** - * intel_aux_display_runtime_put - release an auxiliary power domain reference - * @dev_priv: i915 device instance - * - * This function drops the auxiliary power domain reference obtained by - * intel_aux_display_runtime_get() and might power down the corresponding - * hardware block right away if this is the last reference. - */ -void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) -{ - intel_runtime_pm_put(dev_priv); -} - -/** * intel_runtime_pm_get - grab a runtime pm reference * @dev_priv: i915 device instance * diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 64f16ea779ef..7b990b4e96d2 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -63,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm) #if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) struct imx_drm_device *imxdrm = drm->dev_private; - if (imxdrm->fbhelper) - drm_fbdev_cma_restore_mode(imxdrm->fbhelper); + drm_fbdev_cma_restore_mode(imxdrm->fbhelper); #endif } @@ -340,7 +339,7 @@ err_kms: * imx_drm_add_crtc - add a new crtc */ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, - struct imx_drm_crtc **new_crtc, + struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, struct device_node *port) { @@ -379,7 +378,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); - drm_crtc_init(drm, crtc, + drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); return 0; diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 28e776d8d9d2..83284b4d4be1 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -9,6 +9,7 @@ struct drm_display_mode; struct drm_encoder; struct drm_fbdev_cma; struct drm_framebuffer; +struct drm_plane; struct imx_drm_crtc; struct platform_device; @@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs { }; int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, - struct imx_drm_crtc **new_crtc, + struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, struct device_node *port); int imx_drm_remove_crtc(struct imx_drm_crtc *); diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index e671ad369416..f9597146dc67 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = { { .compatible = "fsl,imx53-tve", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx_tve_dt_ids); static struct platform_driver imx_tve_driver = { .probe = imx_tve_probe, diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7bc8301fafff..4ab841eebee1 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) spin_lock_irqsave(&drm->event_lock, flags); if (ipu_crtc->page_flip_event) - drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); + drm_crtc_send_vblank_event(&ipu_crtc->base, + ipu_crtc->page_flip_event); ipu_crtc->page_flip_event = NULL; imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); spin_unlock_irqrestore(&drm->event_lock, flags); @@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int dp = -EINVAL; int ret; - int id; ret = ipu_get_resources(ipu_crtc, pdata); if (ret) { @@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, return ret; } + if (pdata->dp >= 0) + dp = IPU_DP_FLOW_SYNC_BG; + ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, + DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(ipu_crtc->plane[0])) { + ret = PTR_ERR(ipu_crtc->plane[0]); + goto err_put_resources; + } + ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, - &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); + &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, + ipu_crtc->dev->of_node); if (ret) { dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); goto err_put_resources; } - if (pdata->dp >= 0) - dp = IPU_DP_FLOW_SYNC_BG; - id = imx_drm_crtc_id(ipu_crtc->imx_crtc); - ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu, - pdata->dma[0], dp, BIT(id), true); ret = ipu_plane_get_resources(ipu_crtc->plane[0]); if (ret) { dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", @@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, /* If this crtc is using the DP, add an overlay plane */ if (pdata->dp >= 0 && pdata->dma[1] > 0) { - ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, - pdata->dma[1], - IPU_DP_FLOW_SYNC_FG, - BIT(id), false); + ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1], + IPU_DP_FLOW_SYNC_FG, + drm_crtc_mask(&ipu_crtc->base), + DRM_PLANE_TYPE_OVERLAY); if (IS_ERR(ipu_crtc->plane[1])) ipu_crtc->plane[1] = NULL; } @@ -407,28 +412,6 @@ err_put_resources: return ret; } -static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent, - int port_id) -{ - struct device_node *port; - int id, ret; - - port = of_get_child_by_name(parent, "port"); - while (port) { - ret = of_property_read_u32(port, "reg", &id); - if (!ret && id == port_id) - return port; - - do { - port = of_get_next_child(parent, port); - if (!port) - return NULL; - } while (of_node_cmp(port->name, "port")); - } - - return NULL; -} - static int ipu_drm_bind(struct device *dev, struct device *master, void *data) { struct ipu_client_platformdata *pdata = dev->platform_data; @@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = { static int ipu_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct ipu_client_platformdata *pdata = dev->platform_data; int ret; if (!dev->platform_data) return -EINVAL; - if (!dev->of_node) { - /* Associate crtc device with the corresponding DI port node */ - dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node, - pdata->di + 2); - if (!dev->of_node) { - dev_err(dev, "missing port@%d node in %s\n", - pdata->di + 2, dev->parent->of_node->full_name); - return -ENODEV; - } - } - ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) return ret; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 575f4c84388f..e2ff410bab74 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = { struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, int dma, int dp, unsigned int possible_crtcs, - bool priv) + enum drm_plane_type type) { struct ipu_plane *ipu_plane; int ret; @@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, ipu_plane->dma = dma; ipu_plane->dp_flow = dp; - ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, - &ipu_plane_funcs, ipu_plane_formats, - ARRAY_SIZE(ipu_plane_formats), - priv); + ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, + &ipu_plane_funcs, ipu_plane_formats, + ARRAY_SIZE(ipu_plane_formats), type); if (ret) { DRM_ERROR("failed to initialize plane\n"); kfree(ipu_plane); diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index 9b5eff18f5b8..3a443b413c60 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -34,7 +34,7 @@ struct ipu_plane { struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, int dma, int dp, unsigned int possible_crtcs, - bool priv); + enum drm_plane_type type); /* Init IDMAC, DMFC, DP */ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index b4deb9cf9d71..2e9b9f1b5cd2 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) if (imxpd->panel && imxpd->panel->funcs && imxpd->panel->funcs->get_modes) { + struct drm_display_info *di = &connector->display_info; + num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); + if (!imxpd->bus_format && di->num_bus_formats) + imxpd->bus_format = di->bus_formats[0]; if (num_modes > 0) return num_modes; } diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 4f2068fe5d88..a7bf6a90eae5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c @@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); BUG_ON(pixels_current == pixels_prev); + if (!handle || !file_priv) { + mga_hide_cursor(mdev); + return 0; + } + obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) return -ENOENT; @@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, goto out_unreserve1; } - if (!handle) { - mga_hide_cursor(mdev); - ret = 0; - goto out1; - } - /* Move cursor buffers into VRAM if they aren't already */ if (!pixels_1->pin_count) { ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index 28bc202f9753..40f845e31272 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h @@ -7,6 +7,7 @@ struct nvkm_instmem { const struct nvkm_instmem_func *func; struct nvkm_subdev subdev; + spinlock_t lock; struct list_head list; u32 reserved; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8b8332e46f24..d5e6938cc6bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, return -ENODEV; } obj = (union acpi_object *)buffer.pointer; + len = min(len, (int)obj->buffer.length); memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index db6bc6760545..64c8d932d5f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, struct drm_device *dev = drm->dev; struct nouveau_page_flip_state *s; unsigned long flags; - int crtcid = -1; spin_lock_irqsave(&dev->event_lock, flags); @@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) { - /* Vblank timestamps/counts are only correct on >= NV-50 */ - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) - crtcid = s->crtc; + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { + drm_arm_vblank_event(dev, s->crtc, s->event); + } else { + drm_send_vblank_event(dev, s->crtc, s->event); - drm_send_vblank_event(dev, crtcid, s->event); + /* Give up ownership of vblank for page-flipped crtc */ + drm_vblank_put(dev, s->crtc); + } + } + else { + /* Give up ownership of vblank for page-flipped crtc */ + drm_vblank_put(dev, s->crtc); } - - /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); list_del(&s->head); if (ps) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 3050042e6c6d..a02813e994ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -39,6 +39,7 @@ #include <nvif/client.h> #include <nvif/device.h> +#include <nvif/ioctl.h> #include <drmP.h> @@ -65,9 +66,10 @@ struct nouveau_drm_tile { }; enum nouveau_drm_object_route { - NVDRM_OBJECT_NVIF = 0, + NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF, NVDRM_OBJECT_USIF, NVDRM_OBJECT_ABI16, + NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY, }; enum nouveau_drm_notify_route { diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 89dc4ce63490..6ae1b3494bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (nvif_unpack(argv->v0, 0, 0, true)) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - argv->v0.owner = NVDRM_OBJECT_USIF; + if (argv->v0.object == 0ULL) + argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ + else + argv->v0.owner = NVDRM_OBJECT_USIF; } else goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index e3c783d0e2ab..caf22b589edc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -279,6 +279,12 @@ nvkm_device_pci_10de_0fe3[] = { }; static const struct nvkm_device_pci_vendor +nvkm_device_pci_10de_0fe4[] = { + { 0x144d, 0xc740, NULL, { .War00C800_0 = true } }, + {} +}; + +static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_104b[] = { { 0x1043, 0x844c, "GeForce GT 625" }, { 0x1043, 0x846b, "GeForce GT 625" }, @@ -689,6 +695,12 @@ nvkm_device_pci_10de_1199[] = { }; static const struct nvkm_device_pci_vendor +nvkm_device_pci_10de_11e0[] = { + { 0x1558, 0x5106, NULL, { .War00C800_0 = true } }, + {} +}; + +static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_11e3[] = { { 0x17aa, 0x3683, "GeForce GTX 760A" }, {} @@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = { { 0x0fe1, "GeForce GT 730M" }, { 0x0fe2, "GeForce GT 745M" }, { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, - { 0x0fe4, "GeForce GT 750M" }, + { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 }, { 0x0fe9, "GeForce GT 750M" }, { 0x0fea, "GeForce GT 755M" }, { 0x0fec, "GeForce 710A" }, @@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = { { 0x11c6, "GeForce GTX 650 Ti" }, { 0x11c8, "GeForce GTX 650" }, { 0x11cb, "GeForce GT 740" }, - { 0x11e0, "GeForce GTX 770M" }, + { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 }, { 0x11e1, "GeForce GTX 765M" }, { 0x11e2, "GeForce GTX 765M" }, { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index b5b875928aba..74de7a96c22a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c @@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info) const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; const u32 t = timeslice_mode; const u32 o = PPC_UNIT(gpc, ppc, 0); + if (!(gr->ppc_mask[gpc] & (1 << ppc))) + continue; mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc index 194afe910d21..7dacb3cc0668 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc @@ -52,10 +52,12 @@ mmio_list_base: #endif #ifdef INCLUDE_CODE +#define gpc_addr(reg,addr) /* +*/ imm32(reg,addr) /* +*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE #define gpc_wr32(addr,reg) /* +*/ gpc_addr($r14,addr) /* */ mov b32 $r15 reg /* -*/ imm32($r14, addr) /* -*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /* */ call(nv_wr32) // reports an exception to the host @@ -161,7 +163,7 @@ init: #if NV_PGRAPH_GPCX_UNK__SIZE > 0 // figure out which, and how many, UNKs are actually present - imm32($r14, 0x500c30) + gpc_addr($r14, 0x500c30) clear b32 $r2 clear b32 $r3 clear b32 $r4 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h index 64d07df4b8b1..bb820ff28621 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h @@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40126, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf401, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f008, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x080007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h index 2f596433c222..911976d20940 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h @@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40126, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf401, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f008, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x080007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h index ee8e54db8fc9..1c6e11b05df2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h @@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = { 0x03f01200, 0x0002d000, 0x17f104bd, - 0x10fe0542, + 0x10fe0545, 0x0007f100, 0x0003f007, 0xbd0000d0, @@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = { 0x02d00103, 0xf104bd00, 0xf00c30e7, - 0x24bd50e3, - 0x44bd34bd, -/* 0x0430: init_unk_loop */ - 0xb06821f4, - 0x0bf400f6, - 0x01f7f00f, - 0xfd04f2bb, - 0x30b6054f, -/* 0x0445: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40226, -/* 0x0451: init_unk_done */ - 0x070380e2, - 0xf1080480, - 0xf0010027, - 0x22cf0223, - 0x9534bd00, - 0x07f10825, - 0x03f0c000, - 0x0005d001, - 0x07f104bd, - 0x03f0c100, - 0x0005d001, - 0x0e9804bd, - 0x010f9800, - 0x015021f5, - 0xbb002fbb, - 0x0e98003f, - 0x020f9801, - 0x015021f5, - 0xfd050e98, - 0x2ebb00ef, - 0x003ebb00, - 0x98020e98, - 0x21f5030f, - 0x0e980150, - 0x00effd07, - 0xbb002ebb, - 0x35b6003e, - 0x0007f102, - 0x0103f0d3, - 0xbd0003d0, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb90834b6, - 0x21f5022f, - 0x2fbb02d3, - 0x003fbb00, - 0x010007f1, - 0xd00203f0, + 0xe5f050e3, + 0xbd24bd01, +/* 0x0433: init_unk_loop */ + 0xf444bd34, + 0xf6b06821, + 0x0f0bf400, + 0xbb01f7f0, + 0x4ffd04f2, + 0x0130b605, +/* 0x0448: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf402, +/* 0x0454: init_unk_done */ + 0x80070380, + 0x27f10804, + 0x23f00100, + 0x0022cf02, + 0x259534bd, + 0x0007f108, + 0x0103f0c0, + 0xbd0005d0, + 0x0007f104, + 0x0103f0c1, + 0xbd0005d0, + 0x000e9804, + 0xf5010f98, + 0xbb015021, + 0x3fbb002f, + 0x010e9800, + 0xf5020f98, + 0x98015021, + 0xeffd050e, + 0x002ebb00, + 0x98003ebb, + 0x0f98020e, + 0x5021f503, + 0x070e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0235b600, + 0xd30007f1, + 0xd00103f0, 0x04bd0003, - 0x29f024bd, - 0x0007f11f, - 0x0203f030, - 0xbd0002d0, -/* 0x0505: main */ - 0x0031f404, - 0xf00028f4, - 0x21f424d7, - 0xf401f439, - 0xf404e4b0, - 0x81fe1e18, - 0x0627f001, - 0x12fd20bd, - 0x01e4b604, - 0xfe051efd, - 0x21f50018, - 0x0ef405fa, -/* 0x0535: main_not_ctx_xfer */ - 0x10ef94d3, - 0xf501f5f0, - 0xf4037e21, -/* 0x0542: ih */ - 0x80f9c60e, - 0xf90188fe, - 0xf990f980, - 0xf9b0f9a0, - 0xf9e0f9d0, - 0xf104bdf0, - 0xf00200a7, - 0xaacf00a3, - 0x04abc400, - 0xf02c0bf4, - 0xe7f124d7, - 0xe3f01a00, - 0x00eecf00, - 0x1900f7f1, - 0xcf00f3f0, - 0x21f400ff, - 0x01e7f004, - 0x1d0007f1, - 0xd00003f0, - 0x04bd000e, -/* 0x0590: ih_no_fifo */ - 0x010007f1, - 0xd00003f0, - 0x04bd000a, - 0xe0fcf0fc, - 0xb0fcd0fc, - 0x90fca0fc, - 0x88fe80fc, - 0xf480fc00, - 0x01f80032, -/* 0x05b4: hub_barrier_done */ - 0x9801f7f0, - 0xfebb040e, - 0x02ffb904, - 0x9418e7f1, - 0xf440e3f0, - 0x00f89d21, -/* 0x05cc: ctx_redswitch */ - 0xf120f7f0, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb90834, + 0xd321f502, + 0x002fbb02, + 0xf1003fbb, + 0xf0010007, + 0x03d00203, + 0xbd04bd00, + 0x1f29f024, + 0x300007f1, + 0xd00203f0, + 0x04bd0002, +/* 0x0508: main */ + 0xf40031f4, + 0xd7f00028, + 0x3921f424, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1e, + 0xbd0627f0, + 0x0412fd20, + 0xfd01e4b6, + 0x18fe051e, + 0xfd21f500, + 0xd30ef405, +/* 0x0538: main_not_ctx_xfer */ + 0xf010ef94, + 0x21f501f5, + 0x0ef4037e, +/* 0x0545: ih */ + 0xfe80f9c6, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xa7f104bd, + 0xa3f00200, + 0x00aacf00, + 0xf404abc4, + 0xd7f02c0b, + 0x00e7f124, + 0x00e3f01a, + 0xf100eecf, + 0xf01900f7, + 0xffcf00f3, + 0x0421f400, + 0xf101e7f0, + 0xf01d0007, + 0x0ed00003, +/* 0x0593: ih_no_fifo */ + 0xf104bd00, + 0xf0010007, + 0x0ad00003, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x05b7: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb04, + 0xf102ffb9, + 0xf09418e7, + 0x21f440e3, +/* 0x05cf: ctx_redswitch */ + 0xf000f89d, + 0x07f120f7, + 0x03f08500, + 0x000fd001, + 0xe7f004bd, +/* 0x05e1: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0xf10200f5, 0xf0850007, 0x0fd00103, - 0xf004bd00, -/* 0x05de: ctx_redswitch_delay */ - 0xe2b608e7, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x850007f1, - 0xd00103f0, - 0x04bd000f, -/* 0x05fa: ctx_xfer */ - 0x07f100f8, - 0x03f08100, - 0x000fd002, - 0x11f404bd, - 0xcc21f507, -/* 0x060d: ctx_xfer_not_load */ - 0x6a21f505, - 0xf124bd02, - 0xf047fc07, - 0x02d00203, - 0xf004bd00, - 0x20b6012c, - 0xfc07f103, - 0x0203f04a, - 0xbd0002d0, - 0x01acf004, - 0xf102a5f0, - 0xf00000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x00e7f001, - 0x016f21f5, - 0xf101acf0, - 0xf04000b7, - 0x0c9850b3, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x0800e7f1, - 0x016f21f5, + 0xf804bd00, +/* 0x05fd: ctx_xfer */ + 0x0007f100, + 0x0203f081, + 0xbd000fd0, + 0x0711f404, + 0x05cf21f5, +/* 0x0610: ctx_xfer_not_load */ + 0x026a21f5, + 0x07f124bd, + 0x03f047fc, + 0x0002d002, + 0x2cf004bd, + 0x0320b601, + 0x4afc07f1, + 0xd00203f0, + 0x04bd0002, 0xf001acf0, - 0xb7f104a5, - 0xb3f03000, + 0xb7f102a5, + 0xb3f00000, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0xf1080f98, - 0xf50200e7, - 0xf5016f21, - 0xf4025e21, - 0x12f40601, -/* 0x06a9: ctx_xfer_post */ - 0x7f21f507, -/* 0x06ad: ctx_xfer_done */ - 0xb421f502, - 0x0000f805, - 0x00000000, + 0x010d9800, + 0xf500e7f0, + 0xf0016f21, + 0xb7f101ac, + 0xb3f04000, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0xf1060f98, + 0xf50800e7, + 0xf0016f21, + 0xa5f001ac, + 0x00b7f104, + 0x50b3f030, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0xe7f1080f, + 0x21f50200, + 0x21f5016f, + 0x01f4025e, + 0x0712f406, +/* 0x06ac: ctx_xfer_post */ + 0x027f21f5, +/* 0x06b0: ctx_xfer_done */ + 0x05b721f5, + 0x000000f8, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h index fbcc342f896f..84af7ec6a78e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h @@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = { 0x02020014, 0xf6120040, 0x04bd0002, - 0xfe048141, + 0xfe048441, 0x00400010, 0x0000f607, 0x040204bd, @@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = { 0x01c90080, 0xbd0002f6, 0x0c308e04, - 0xbd24bd50, -/* 0x0383: init_unk_loop */ - 0x7e44bd34, - 0xb0000065, - 0x0bf400f6, - 0xbb010f0e, - 0x4ffd04f2, - 0x0130b605, -/* 0x0398: init_unk_next */ - 0xb60120b6, - 0x26b004e0, - 0xe21bf401, -/* 0x03a4: init_unk_done */ - 0xb50703b5, - 0x00820804, - 0x22cf0201, - 0x9534bd00, - 0x00800825, - 0x05f601c0, - 0x8004bd00, - 0xf601c100, - 0x04bd0005, - 0x98000e98, - 0x207e010f, - 0x2fbb0001, - 0x003fbb00, - 0x98010e98, - 0x207e020f, - 0x0e980001, - 0x00effd05, - 0xbb002ebb, - 0x0e98003e, - 0x030f9802, - 0x0001207e, - 0xfd070e98, - 0x2ebb00ef, - 0x003ebb00, - 0x800235b6, - 0xf601d300, - 0x04bd0003, - 0xb60825b6, - 0x20b60635, - 0x0130b601, - 0xb60824b6, - 0x2fb20834, - 0x0002687e, - 0xbb002fbb, - 0x0080003f, - 0x03f60201, - 0xbd04bd00, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, -/* 0x0445: main */ - 0x0031f404, - 0x0d0028f4, - 0x00377e24, - 0xf401f400, - 0xf404e4b0, - 0x81fe1d18, - 0xbd060201, - 0x0412fd20, - 0xfd01e4b6, - 0x18fe051e, - 0x05187e00, - 0xd40ef400, -/* 0x0474: main_not_ctx_xfer */ - 0xf010ef94, - 0xf87e01f5, - 0x0ef40002, -/* 0x0481: ih */ - 0xfe80f9c7, - 0x80f90188, - 0xa0f990f9, - 0xd0f9b0f9, - 0xf0f9e0f9, - 0x004a04bd, - 0x00aacf02, - 0xf404abc4, - 0x240d1f0b, - 0xcf1a004e, - 0x004f00ee, - 0x00ffcf19, - 0x0000047e, - 0x0040010e, - 0x000ef61d, -/* 0x04be: ih_no_fifo */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0x0032f480, -/* 0x04de: hub_barrier_done */ - 0x010f01f8, - 0xbb040e98, - 0xffb204fe, - 0x4094188e, - 0x00008f7e, -/* 0x04f2: ctx_redswitch */ - 0x200f00f8, + 0x01e5f050, + 0x34bd24bd, +/* 0x0386: init_unk_loop */ + 0x657e44bd, + 0xf6b00000, + 0x0e0bf400, + 0xf2bb010f, + 0x054ffd04, +/* 0x039b: init_unk_next */ + 0xb60130b6, + 0xe0b60120, + 0x0126b004, +/* 0x03a7: init_unk_done */ + 0xb5e21bf4, + 0x04b50703, + 0x01008208, + 0x0022cf02, + 0x259534bd, + 0xc0008008, + 0x0005f601, + 0x008004bd, + 0x05f601c1, + 0x9804bd00, + 0x0f98000e, + 0x01207e01, + 0x002fbb00, + 0x98003fbb, + 0x0f98010e, + 0x01207e02, + 0x050e9800, + 0xbb00effd, + 0x3ebb002e, + 0x020e9800, + 0x7e030f98, + 0x98000120, + 0xeffd070e, + 0x002ebb00, + 0xb6003ebb, + 0x00800235, + 0x03f601d3, + 0xb604bd00, + 0x35b60825, + 0x0120b606, + 0xb60130b6, + 0x34b60824, + 0x7e2fb208, + 0xbb000268, + 0x3fbb002f, + 0x01008000, + 0x0003f602, + 0x24bd04bd, + 0x801f29f0, + 0xf6023000, + 0x04bd0002, +/* 0x0448: main */ + 0xf40031f4, + 0x240d0028, + 0x0000377e, + 0xb0f401f4, + 0x18f404e4, + 0x0181fe1d, + 0x20bd0602, + 0xb60412fd, + 0x1efd01e4, + 0x0018fe05, + 0x00051b7e, +/* 0x0477: main_not_ctx_xfer */ + 0x94d40ef4, + 0xf5f010ef, + 0x02f87e01, + 0xc70ef400, +/* 0x0484: ih */ + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e240d1f, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0x0e000004, + 0x1d004001, + 0xbd000ef6, +/* 0x04c1: ih_no_fifo */ + 0x01004004, + 0xbd000af6, + 0xfcf0fc04, + 0xfcd0fce0, + 0xfca0fcb0, + 0xfe80fc90, + 0x80fc0088, + 0xf80032f4, +/* 0x04e1: hub_barrier_done */ + 0x98010f01, + 0xfebb040e, + 0x8effb204, + 0x7e409418, + 0xf800008f, +/* 0x04f5: ctx_redswitch */ + 0x80200f00, + 0xf6018500, + 0x04bd000f, +/* 0x0502: ctx_redswitch_delay */ + 0xe2b6080e, + 0xfd1bf401, + 0x0800f5f1, + 0x0200f5f1, 0x01850080, 0xbd000ff6, -/* 0x04ff: ctx_redswitch_delay */ - 0xb6080e04, - 0x1bf401e2, - 0x00f5f1fd, - 0x00f5f108, - 0x85008002, - 0x000ff601, - 0x00f804bd, -/* 0x0518: ctx_xfer */ - 0x02810080, - 0xbd000ff6, - 0x0711f404, - 0x0004f27e, -/* 0x0528: ctx_xfer_not_load */ - 0x0002167e, - 0xfc8024bd, - 0x02f60247, - 0xf004bd00, - 0x20b6012c, - 0x4afc8003, +/* 0x051b: ctx_xfer */ + 0x8000f804, + 0xf6028100, + 0x04bd000f, + 0x7e0711f4, +/* 0x052b: ctx_xfer_not_load */ + 0x7e0004f5, + 0xbd000216, + 0x47fc8024, 0x0002f602, - 0xacf004bd, - 0x02a5f001, - 0x5000008b, - 0xb6040c98, - 0xbcbb0fc4, - 0x000c9800, - 0x0e010d98, - 0x013d7e00, - 0x01acf000, - 0x5040008b, - 0xb6040c98, - 0xbcbb0fc4, - 0x010c9800, - 0x98020d98, - 0x004e060f, - 0x013d7e08, - 0x01acf000, - 0x8b04a5f0, - 0x98503000, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x8b02a5f0, + 0x98500000, 0xc4b6040c, 0x00bcbb0f, - 0x98020c98, - 0x0f98030d, - 0x02004e08, + 0x98000c98, + 0x000e010d, 0x00013d7e, - 0x00020a7e, - 0xf40601f4, -/* 0x05b2: ctx_xfer_post */ - 0x277e0712, -/* 0x05b6: ctx_xfer_done */ - 0xde7e0002, - 0x00f80004, - 0x00000000, + 0x8b01acf0, + 0x98504000, + 0xc4b6040c, + 0x00bcbb0f, + 0x98010c98, + 0x0f98020d, + 0x08004e06, + 0x00013d7e, + 0xf001acf0, + 0x008b04a5, + 0x0c985030, + 0x0fc4b604, + 0x9800bcbb, + 0x0d98020c, + 0x080f9803, + 0x7e02004e, + 0x7e00013d, + 0xf400020a, + 0x12f40601, +/* 0x05b5: ctx_xfer_post */ + 0x02277e07, +/* 0x05b9: ctx_xfer_done */ + 0x04e17e00, + 0x0000f800, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h index 51f5c3c6e966..11bf363a6ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h @@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05b04104, + 0x05b34104, 0x400010fe, 0x00f60700, 0x0204bd00, @@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = { 0xc900800f, 0x0002f601, 0x308e04bd, - 0x24bd500c, - 0x44bd34bd, -/* 0x03b0: init_unk_loop */ - 0x0000657e, - 0xf400f6b0, - 0x010f0e0b, - 0xfd04f2bb, - 0x30b6054f, -/* 0x03c5: init_unk_next */ - 0x0120b601, - 0xb004e0b6, - 0x1bf40226, -/* 0x03d1: init_unk_done */ - 0x0703b5e2, - 0x820804b5, - 0xcf020100, - 0x34bd0022, - 0x80082595, - 0xf601c000, + 0xe5f0500c, + 0xbd24bd01, +/* 0x03b3: init_unk_loop */ + 0x7e44bd34, + 0xb0000065, + 0x0bf400f6, + 0xbb010f0e, + 0x4ffd04f2, + 0x0130b605, +/* 0x03c8: init_unk_next */ + 0xb60120b6, + 0x26b004e0, + 0xe21bf402, +/* 0x03d4: init_unk_done */ + 0xb50703b5, + 0x00820804, + 0x22cf0201, + 0x9534bd00, + 0x00800825, + 0x05f601c0, + 0x8004bd00, + 0xf601c100, 0x04bd0005, - 0x01c10080, - 0xbd0005f6, - 0x000e9804, - 0x7e010f98, - 0xbb000120, - 0x3fbb002f, - 0x010e9800, - 0x7e020f98, - 0x98000120, - 0xeffd050e, - 0x002ebb00, - 0x98003ebb, - 0x0f98020e, - 0x01207e03, - 0x070e9800, - 0xbb00effd, - 0x3ebb002e, - 0x0235b600, - 0x01d30080, - 0xbd0003f6, - 0x0825b604, - 0xb60635b6, - 0x30b60120, - 0x0824b601, - 0xb20834b6, - 0x02687e2f, - 0x002fbb00, - 0x0f003fbb, - 0x8effb23f, - 0xf0501d60, - 0x8f7e01e5, - 0x0c0f0000, - 0xa88effb2, - 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0xb23f0f00, - 0x1d608eff, - 0x01e5f050, - 0x00008f7e, - 0xffb2000f, - 0x501d9c8e, - 0x7e01e5f0, - 0x0f00008f, - 0x03147e01, - 0x8effb200, + 0x98000e98, + 0x207e010f, + 0x2fbb0001, + 0x003fbb00, + 0x98010e98, + 0x207e020f, + 0x0e980001, + 0x00effd05, + 0xbb002ebb, + 0x0e98003e, + 0x030f9802, + 0x0001207e, + 0xfd070e98, + 0x2ebb00ef, + 0x003ebb00, + 0x800235b6, + 0xf601d300, + 0x04bd0003, + 0xb60825b6, + 0x20b60635, + 0x0130b601, + 0xb60824b6, + 0x2fb20834, + 0x0002687e, + 0xbb002fbb, + 0x3f0f003f, + 0x501d608e, + 0xb201e5f0, + 0x008f7eff, + 0x8e0c0f00, 0xf0501da8, - 0x8f7e01e5, - 0xff0f0000, - 0x988effb2, + 0xffb201e5, + 0x00008f7e, + 0x0003147e, + 0x608e3f0f, 0xe5f0501d, - 0x008f7e01, - 0xb2020f00, - 0x1da88eff, + 0x7effb201, + 0x0f00008f, + 0x1d9c8e00, 0x01e5f050, - 0x00008f7e, + 0x8f7effb2, + 0x010f0000, 0x0003147e, - 0x85050498, - 0x98504000, - 0x64b60406, - 0x0056bb0f, -/* 0x04e0: tpc_strand_init_tpc_loop */ - 0x05705eb8, - 0x00657e00, - 0xbdf6b200, -/* 0x04ed: tpc_strand_init_idx_loop */ - 0x605eb874, - 0x7fb20005, - 0x00008f7e, - 0x05885eb8, - 0x082f9500, - 0x00008f7e, - 0x058c5eb8, - 0x082f9500, + 0x501da88e, + 0xb201e5f0, + 0x008f7eff, + 0x8eff0f00, + 0xf0501d98, + 0xffb201e5, 0x00008f7e, - 0x05905eb8, - 0x00657e00, - 0x06f5b600, - 0xb601f0b6, - 0x2fbb08f4, - 0x003fbb00, - 0xb60170b6, - 0x1bf40162, - 0x0050b7bf, - 0x0142b608, - 0x0fa81bf4, - 0x8effb23f, - 0xf0501d60, - 0x8f7e01e5, - 0x0d0f0000, - 0xa88effb2, + 0xa88e020f, 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0x01008000, - 0x0003f602, - 0x24bd04bd, - 0x801f29f0, - 0xf6023000, - 0x04bd0002, -/* 0x0574: main */ - 0xf40031f4, - 0x240d0028, - 0x0000377e, - 0xb0f401f4, - 0x18f404e4, - 0x0181fe1d, - 0x20bd0602, - 0xb60412fd, - 0x1efd01e4, - 0x0018fe05, - 0x0006477e, -/* 0x05a3: main_not_ctx_xfer */ - 0x94d40ef4, - 0xf5f010ef, - 0x02f87e01, - 0xc70ef400, -/* 0x05b0: ih */ - 0x88fe80f9, - 0xf980f901, - 0xf9a0f990, - 0xf9d0f9b0, - 0xbdf0f9e0, - 0x02004a04, - 0xc400aacf, - 0x0bf404ab, - 0x4e240d1f, - 0xeecf1a00, - 0x19004f00, - 0x7e00ffcf, - 0x0e000004, - 0x1d004001, - 0xbd000ef6, -/* 0x05ed: ih_no_fifo */ - 0x01004004, - 0xbd000af6, - 0xfcf0fc04, - 0xfcd0fce0, - 0xfca0fcb0, - 0xfe80fc90, - 0x80fc0088, - 0xf80032f4, -/* 0x060d: hub_barrier_done */ - 0x98010f01, - 0xfebb040e, - 0x8effb204, - 0x7e409418, - 0xf800008f, -/* 0x0621: ctx_redswitch */ - 0x80200f00, + 0x7effb201, + 0x7e00008f, + 0x98000314, + 0x00850504, + 0x06985040, + 0x0f64b604, +/* 0x04e3: tpc_strand_init_tpc_loop */ + 0xb80056bb, + 0x0005705e, + 0x0000657e, + 0x74bdf6b2, +/* 0x04f0: tpc_strand_init_idx_loop */ + 0x05605eb8, + 0x7e7fb200, + 0xb800008f, + 0x0005885e, + 0x7e082f95, + 0xb800008f, + 0x00058c5e, + 0x7e082f95, + 0xb800008f, + 0x0005905e, + 0x0000657e, + 0xb606f5b6, + 0xf4b601f0, + 0x002fbb08, + 0xb6003fbb, + 0x62b60170, + 0xbf1bf401, + 0x080050b7, + 0xf40142b6, + 0x3f0fa81b, + 0x501d608e, + 0xb201e5f0, + 0x008f7eff, + 0x8e0d0f00, + 0xf0501da8, + 0xffb201e5, + 0x00008f7e, + 0x0003147e, + 0x02010080, + 0xbd0003f6, + 0xf024bd04, + 0x00801f29, + 0x02f60230, +/* 0x0577: main */ + 0xf404bd00, + 0x28f40031, + 0x7e240d00, + 0xf4000037, + 0xe4b0f401, + 0x1d18f404, + 0x020181fe, + 0xfd20bd06, + 0xe4b60412, + 0x051efd01, + 0x7e0018fe, + 0xf400064a, +/* 0x05a6: main_not_ctx_xfer */ + 0xef94d40e, + 0x01f5f010, + 0x0002f87e, +/* 0x05b3: ih */ + 0xf9c70ef4, + 0x0188fe80, + 0x90f980f9, + 0xb0f9a0f9, + 0xe0f9d0f9, + 0x04bdf0f9, + 0xcf02004a, + 0xabc400aa, + 0x1f0bf404, + 0x004e240d, + 0x00eecf1a, + 0xcf19004f, + 0x047e00ff, + 0x010e0000, + 0xf61d0040, + 0x04bd000e, +/* 0x05f0: ih_no_fifo */ + 0xf6010040, + 0x04bd000a, + 0xe0fcf0fc, + 0xb0fcd0fc, + 0x90fca0fc, + 0x88fe80fc, + 0xf480fc00, + 0x01f80032, +/* 0x0610: hub_barrier_done */ + 0x0e98010f, + 0x04febb04, + 0x188effb2, + 0x8f7e4094, + 0x00f80000, +/* 0x0624: ctx_redswitch */ + 0x0080200f, + 0x0ff60185, + 0x0e04bd00, +/* 0x0631: ctx_redswitch_delay */ + 0x01e2b608, + 0xf1fd1bf4, + 0xf10800f5, + 0x800200f5, 0xf6018500, 0x04bd000f, -/* 0x062e: ctx_redswitch_delay */ - 0xe2b6080e, - 0xfd1bf401, - 0x0800f5f1, - 0x0200f5f1, - 0x01850080, - 0xbd000ff6, -/* 0x0647: ctx_xfer */ - 0x8000f804, - 0xf6028100, - 0x04bd000f, - 0xc48effb2, - 0xe5f0501d, - 0x008f7e01, - 0x0711f400, - 0x0006217e, -/* 0x0664: ctx_xfer_not_load */ - 0x0002167e, - 0xfc8024bd, - 0x02f60247, - 0xf004bd00, - 0x20b6012c, - 0x4afc8003, +/* 0x064a: ctx_xfer */ + 0x008000f8, + 0x0ff60281, + 0x8e04bd00, + 0xf0501dc4, + 0xffb201e5, + 0x00008f7e, + 0x7e0711f4, +/* 0x0667: ctx_xfer_not_load */ + 0x7e000624, + 0xbd000216, + 0x47fc8024, 0x0002f602, - 0x0c0f04bd, - 0xa88effb2, - 0xe5f0501d, - 0x008f7e01, - 0x03147e00, - 0xb23f0f00, - 0x1d608eff, - 0x01e5f050, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x8e0c0f04, + 0xf0501da8, + 0xffb201e5, 0x00008f7e, - 0xffb2000f, - 0x501d9c8e, - 0x7e01e5f0, + 0x0003147e, + 0x608e3f0f, + 0xe5f0501d, + 0x7effb201, 0x0f00008f, - 0x03147e01, - 0x01fcf000, - 0xb203f0b6, - 0x1da88eff, + 0x1d9c8e00, 0x01e5f050, - 0x00008f7e, - 0xf001acf0, - 0x008b02a5, - 0x0c985000, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98000c, - 0x7e000e01, - 0xf000013d, - 0x008b01ac, - 0x0c985040, - 0x0fc4b604, - 0x9800bcbb, - 0x0d98010c, - 0x060f9802, - 0x7e08004e, - 0xf000013d, + 0x8f7effb2, + 0x010f0000, + 0x0003147e, + 0xb601fcf0, + 0xa88e03f0, + 0xe5f0501d, + 0x7effb201, + 0xf000008f, 0xa5f001ac, - 0x30008b04, + 0x00008b02, 0x040c9850, 0xbb0fc4b6, 0x0c9800bc, - 0x030d9802, - 0x4e080f98, - 0x3d7e0200, - 0x0a7e0001, - 0x147e0002, - 0x01f40003, - 0x1a12f406, -/* 0x073c: ctx_xfer_post */ - 0x0002277e, - 0xffb20d0f, - 0x501da88e, - 0x7e01e5f0, - 0x7e00008f, -/* 0x0753: ctx_xfer_done */ - 0x7e000314, - 0xf800060d, - 0x00000000, + 0x010d9800, + 0x3d7e000e, + 0xacf00001, + 0x40008b01, + 0x040c9850, + 0xbb0fc4b6, + 0x0c9800bc, + 0x020d9801, + 0x4e060f98, + 0x3d7e0800, + 0xacf00001, + 0x04a5f001, + 0x5030008b, + 0xb6040c98, + 0xbcbb0fc4, + 0x020c9800, + 0x98030d98, + 0x004e080f, + 0x013d7e02, + 0x020a7e00, + 0x03147e00, + 0x0601f400, +/* 0x073f: ctx_xfer_post */ + 0x7e1a12f4, + 0x0f000227, + 0x1da88e0d, + 0x01e5f050, + 0x8f7effb2, + 0x147e0000, +/* 0x0756: ctx_xfer_done */ + 0x107e0003, + 0x00f80006, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dda7a7d224c9..9f5dfc85147a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, static int gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) { - struct gf100_gr *gr = (void *)object->engine; + struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); union { struct fermi_a_zbc_color_v0 v0; } *args = data; @@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) static int gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) { - struct gf100_gr *gr = (void *)object->engine; + struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); union { struct fermi_a_zbc_depth_v0 v0; } *args = data; @@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base) gr->ppc_nr[i] = gr->func->ppc_nr; for (j = 0; j < gr->ppc_nr[i]; j++) { u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); + if (mask) + gr->ppc_mask[i] |= (1 << j); gr->ppc_tpc_nr[i][j] = hweight8(mask); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 4611961b1187..02e78b8d93f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -97,6 +97,7 @@ struct gf100_gr { u8 tpc_nr[GPC_MAX]; u8 tpc_total; u8 ppc_nr[GPC_MAX]; + u8 ppc_mask[GPC_MAX]; u8 ppc_tpc_nr[GPC_MAX][4]; struct nvkm_memory *unk4188b4; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 895ba74057d4..1d7dd38292b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -97,7 +97,9 @@ static void * nvkm_instobj_dtor(struct nvkm_memory *memory) { struct nvkm_instobj *iobj = nvkm_instobj(memory); + spin_lock(&iobj->imem->lock); list_del(&iobj->head); + spin_unlock(&iobj->imem->lock); nvkm_memory_del(&iobj->parent); return iobj; } @@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); iobj->parent = memory; iobj->imem = imem; + spin_lock(&iobj->imem->lock); list_add_tail(&iobj->head, &imem->list); + spin_unlock(&iobj->imem->lock); memory = &iobj->memory; } @@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, { nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); imem->func = func; + spin_lock_init(&imem->lock); INIT_LIST_HEAD(&imem->list); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index b61509e26ec9..b735173a18ff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c @@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) duty = (uv - bios->base) * div / bios->pwm_range; nvkm_wr32(device, 0x20340, div); - nvkm_wr32(device, 0x20344, 0x8000000 | duty); + nvkm_wr32(device, 0x20344, 0x80000000 | duty); return 0; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 248953d2fdb7..0154db43860c 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8472,7 +8472,7 @@ restart_ih: if (queue_dp) schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_reset) { rdev->needs_reset = true; wake_up_all(&rdev->fence_queue); @@ -9630,6 +9630,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev, (rdev->disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); } + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 7f33767d7ed6..2ad462896896 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -5344,7 +5347,7 @@ restart_ih: if (queue_dp) schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); if (queue_thermal && rdev->pm.dpm_enabled) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 238b13f045c1..9e7e2bf03b81 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev) status = r100_irq_ack(rdev); } if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS400: @@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) uint32_t pixel_bytes1 = 0; uint32_t pixel_bytes2 = 0; + /* Guess line buffer size to be 8192 pixels */ + u32 lb_size = 8192; + if (!rdev->mode_info.mode_config_initialized) return; @@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev) DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); } + + /* Save number of lines the linebuffer leads before the scanout */ + if (mode1) + rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); + + if (mode2) + rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); } int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4ea5b10ff5f4..cc2fdf0be37a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4276,7 +4276,7 @@ restart_ih: WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); if (queue_thermal && rdev->pm.dpm_enabled) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b6cbd816537e..87db64983ea8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2414,7 +2414,7 @@ struct radeon_device { struct r600_ih ih; /* r6/700 interrupt ring */ struct radeon_rlc rlc; struct radeon_mec mec; - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct work_struct dp_work; struct work_struct audio_work; int num_crtc; /* number of crtcs */ diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index fe994aac3b04..c77d349c561c 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, PCI_VENDOR_ID_IBM, 0x0550, 1}, + /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x054d, 1}, /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, PCI_VENDOR_ID_IBM, 0x0530, 1}, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5a2cafb4f1bc..340f3f549f29 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (r < 0) return connector_status_disconnected; + if (radeon_connector->detected_hpd_without_ddc) { + force = true; + radeon_connector->detected_hpd_without_ddc = false; + } + if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; goto exit; } - if (radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) { dret = radeon_ddc_probe(radeon_connector, false); + + /* Sometimes the pins required for the DDC probe on DVI + * connectors don't make contact at the same time that the ones + * for HPD do. If the DDC probe fails even though we had an HPD + * signal, try again later */ + if (!dret && !force && + connector->status != connector_status_connected) { + DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n"); + radeon_connector->detected_hpd_without_ddc = true; + schedule_delayed_work(&rdev->hotplug_work, + msecs_to_jiffies(1000)); + goto exit; + } + } if (dret) { radeon_connector->detected_by_load = false; radeon_connector_free_edid(connector); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index a8d9927ed9eb..1eca0acac016 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) * to complete in this vblank? */ if (update_pending && - (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, + (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, + crtc_id, + USE_REAL_VBLANKSTART, &vpos, &hpos, NULL, NULL, &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || @@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &radeon_crtc->base; unsigned long flags; int r; + int vpos, hpos, stat, min_udelay; + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; down_read(&rdev->exclusive_lock); if (work->fence) { @@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work) /* set the proper interrupt */ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + /* If this happens to execute within the "virtually extended" vblank + * interval before the start of the real vblank interval then it needs + * to delay programming the mmio flip until the real vblank is entered. + * This prevents completing a flip too early due to the way we fudge + * our vblank counter and vblank timestamps in order to work around the + * problem that the hw fires vblank interrupts before actual start of + * vblank (when line buffer refilling is done for a frame). It + * complements the fudging logic in radeon_get_crtc_scanoutpos() for + * timestamping and radeon_get_vblank_counter_kms() for vblank counts. + * + * In practice this won't execute very often unless on very fast + * machines because the time window for this to happen is very small. + */ + for (;;) { + /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank + * start in hpos, and to the "fudged earlier" vblank start in + * vpos. + */ + stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, + GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode); + + if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || + !(vpos >= 0 && hpos <= 0)) + break; + + /* Sleep at least until estimated real start of hw vblank */ + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); + usleep_range(min_udelay, 2 * min_udelay); + spin_lock_irqsave(&crtc->dev->event_lock, flags); + }; + /* do the flip (mmio) */ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); @@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * \param dev Device to query. * \param crtc Crtc to query. * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * For driver internal use only also supports these flags: + * + * USE_REAL_VBLANKSTART to use the real start of vblank instead + * of a fudged earlier start of vblank. + * + * GET_DISTANCE_TO_VBLANKSTART to return distance to the + * fudged earlier start of vblank in *vpos and the distance + * to true start of vblank in *hpos. + * * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * \param *stime Target location for timestamp taken immediately before @@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, vbl_end = 0; } + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; + } + + /* Fudge vblank to start a few scanlines earlier to handle the + * problem that vblank irqs fire a few scanlines before start + * of vblank. Some driver internal callers need the true vblank + * start to be used and signal this via the USE_REAL_VBLANKSTART flag. + * + * The cause of the "early" vblank irq is that the irq is triggered + * by the line buffer logic when the line buffer read position enters + * the vblank, whereas our crtc scanout position naturally lags the + * line buffer read position. + */ + if (!(flags & USE_REAL_VBLANKSTART)) + vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; + /* Test scanout position against vblank region. */ if ((*vpos < vbl_start) && (*vpos >= vbl_end)) in_vbl = false; + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_IN_VBLANK; + + /* Called from driver internal vblank counter query code? */ + if (flags & GET_DISTANCE_TO_VBLANKSTART) { + /* Caller wants distance from fudged earlier vbl_start */ + *vpos -= vbl_start; + return ret; + } + /* Check if inside vblank area and apply corrective offsets: * vpos will then be >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until @@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* Correct for shifted end of vbl at vbl_end. */ *vpos = *vpos - vbl_end; - /* In vblank? */ - if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; - - /* Is vpos outside nominal vblank area, but less than - * 1/100 of a frame height away from start of vblank? - * If so, assume this isn't a massively delayed vblank - * interrupt, but a vblank interrupt that fired a few - * microseconds before true start of vblank. Compensate - * by adding a full frame duration to the final timestamp. - * Happens, e.g., on ATI R500, R600. - * - * We only do this if DRM_CALLED_FROM_VBLIRQ. - */ - if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { - vbl_start = mode->crtc_vdisplay; - vtotal = mode->crtc_vtotal; - - if (vbl_start - *vpos < vtotal / 100) { - *vpos -= vtotal; - - /* Signal this correction as "applied". */ - ret |= 0x8; - } - } - return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 171d3e43c30c..979f3bf65f2c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, - hotplug_work); + hotplug_work.work); struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; @@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) } } - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); INIT_WORK(&rdev->dp_work, radeon_dp_work_func); INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); @@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); if (r) { rdev->irq.installed = false; - flush_work(&rdev->hotplug_work); + flush_delayed_work(&rdev->hotplug_work); return r; } @@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); - flush_work(&rdev->hotplug_work); + flush_delayed_work(&rdev->hotplug_work); } } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0ec6fcca16d3..d290a8a09036 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, */ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { + int vpos, hpos, stat; + u32 count; struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= rdev->num_crtc) { @@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) return -EINVAL; } - return radeon_get_vblank_counter(rdev, crtc); + /* The hw increments its frame counter at start of vsync, not at start + * of vblank, as is required by DRM core vblank counter handling. + * Cook the hw count here to make it appear to the caller as if it + * incremented at start of vblank. We measure distance to start of + * vblank in vpos. vpos therefore will be >= 0 between start of vblank + * and start of vsync, so vpos >= 0 means to bump the hw frame counter + * result by 1 to give the proper appearance to caller. + */ + if (rdev->mode_info.crtcs[crtc]) { + /* Repeat readout if needed to provide stable result if + * we cross start of vsync during the queries. + */ + do { + count = radeon_get_vblank_counter(rdev, crtc); + /* Ask radeon_get_crtc_scanoutpos to return vpos as + * distance to start of vblank, instead of regular + * vertical scanout pos. + */ + stat = radeon_get_crtc_scanoutpos( + dev, crtc, GET_DISTANCE_TO_VBLANKSTART, + &vpos, &hpos, NULL, NULL, + &rdev->mode_info.crtcs[crtc]->base.hwmode); + } while (count != radeon_get_vblank_counter(rdev, crtc)); + + if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { + DRM_DEBUG_VBL("Query failed! stat %d\n", stat); + } + else { + DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", + crtc, vpos); + + /* Bump counter if we are at >= leading edge of vblank, + * but before vsync where vpos would turn negative and + * the hw counter really increments. + */ + if (vpos >= 0) + count++; + } + } + else { + /* Fallback to use value as is. */ + count = radeon_get_vblank_counter(rdev, crtc); + DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); + } + + return count; } /** diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 830e171c3a9e..bba112628b47 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -367,6 +367,7 @@ struct radeon_crtc { u32 line_time; u32 wm_low; u32 wm_high; + u32 lb_vblank_lead_lines; struct drm_display_mode hw_mode; enum radeon_output_csc output_csc; }; @@ -553,6 +554,7 @@ struct radeon_connector { void *con_priv; bool dac_load_detect; bool detected_by_load; /* if the connection status was determined by load */ + bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ uint16_t connector_object_id; struct radeon_hpd hpd; struct radeon_router router; @@ -686,6 +688,9 @@ struct atom_voltage_table struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; }; +/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */ +#define USE_REAL_VBLANKSTART (1 << 30) +#define GET_DISTANCE_TO_VBLANKSTART (1 << 31) extern void radeon_add_atom_connector(struct drm_device *dev, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d3024883b844..84d45633d28c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev, if (!(rdev->flags & RADEON_IS_PCIE)) bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx + * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 + */ + if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + #ifdef CONFIG_X86_32 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 */ - bo->flags &= ~RADEON_GEM_GTT_WC; + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) /* Don't try to enable write-combining when it can't work, or things * may be slow @@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev, #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ thanks to write-combining - DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " - "better performance thanks to write-combining\n"); - bo->flags &= ~RADEON_GEM_GTT_WC; + if (bo->flags & RADEON_GEM_GTT_WC) + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); #endif radeon_ttm_placement_from_domain(bo, domain); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6d80dde23400..59abebd6b5dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev) ret = device_create_file(rdev->dev, &dev_attr_power_method); if (ret) DRM_ERROR("failed to create device file for power method\n"); - if (!ret) - rdev->pm.sysfs_initialized = true; + rdev->pm.sysfs_initialized = true; } mutex_lock(&rdev->pm.mutex); @@ -1757,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { - vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, + vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, + crtc, + USE_REAL_VBLANKSTART, &vpos, &hpos, NULL, NULL, &rdev->mode_info.crtcs[crtc]->base.hwmode); if ((vbl_status & DRM_SCANOUTPOS_VALID) && diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 97a904835759..6244f4e44e9a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev) status = rs600_irq_ack(rdev); } if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); if (rdev->msi_enabled) { diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 516ca27cfa12..6bc44c24e837 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, { u32 tmp; + /* Guess line buffer size to be 8192 pixels */ + u32 lb_size = 8192; + /* * Line Buffer Setup * There is a single line buffer shared by both display controllers. @@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; } WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); + + /* Save number of lines the linebuffer leads before the scanout */ + if (mode1) + rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); + + if (mode2) + rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); } struct rs690_watermark { diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c index 3f5e1cf138ba..d37ba2cb886e 100644 --- a/drivers/gpu/drm/radeon/rv730_dpm.c +++ b/drivers/gpu/drm/radeon/rv730_dpm.c @@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev) result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); if (result != PPSMC_Result_OK) - DRM_ERROR("Could not force DPM to low\n"); + DRM_DEBUG("Could not force DPM to low\n"); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b9c770745a7a..e830c8935db0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev) result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); if (result != PPSMC_Result_OK) - DRM_ERROR("Could not force DPM to low.\n"); + DRM_DEBUG("Could not force DPM to low.\n"); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); @@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev) int rv770_set_sw_state(struct radeon_device *rdev) { if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) - return -EINVAL; + DRM_DEBUG("rv770_set_sw_state failed\n"); return 0; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 07037e32dea3..f878d6962da5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev, c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -6848,7 +6851,7 @@ restart_ih: if (queue_dp) schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index e72bf46042e0..a82b891ae1fe 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 8caea0a33dd8..d908321b94ce 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, &rk_obj->dma_attrs); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d8ae5e49c44..03c47eeadc81 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = { .data = &rk3288_vop }, {}, }; +MODULE_DEVICE_TABLE(of, vop_driver_dt_match); static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) { @@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane, val = (dest.y2 - dest.y1 - 1) << 16; val |= (dest.x2 - dest.x1 - 1) & 0xffff; VOP_WIN_SET(vop, win, dsp_info, val); - val = (dsp_sty - 1) << 16; - val |= (dsp_stx - 1) & 0xffff; + val = dsp_sty << 16; + val |= dsp_stx & 0xffff; VOP_WIN_SET(vop, win, dsp_st, val); VOP_WIN_SET(vop, win, rb_swap, rb_swap); @@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win, if (state->event) { spin_lock_irqsave(&drm->event_lock, flags); - drm_send_vblank_event(drm, -1, state->event); + drm_crtc_send_vblank_event(crtc, state->event); spin_unlock_irqrestore(&drm->event_lock, flags); } @@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop) return PTR_ERR(vop->dclk); } - ret = clk_prepare(vop->hclk); - if (ret < 0) { - dev_err(vop->dev, "failed to prepare hclk\n"); - return ret; - } - ret = clk_prepare(vop->dclk); if (ret < 0) { dev_err(vop->dev, "failed to prepare dclk\n"); - goto err_unprepare_hclk; + return ret; } - ret = clk_prepare(vop->aclk); + /* Enable both the hclk and aclk to setup the vop */ + ret = clk_prepare_enable(vop->hclk); if (ret < 0) { - dev_err(vop->dev, "failed to prepare aclk\n"); + dev_err(vop->dev, "failed to prepare/enable hclk\n"); goto err_unprepare_dclk; } - /* - * enable hclk, so that we can config vop register. - */ - ret = clk_enable(vop->hclk); + ret = clk_prepare_enable(vop->aclk); if (ret < 0) { - dev_err(vop->dev, "failed to prepare aclk\n"); - goto err_unprepare_aclk; + dev_err(vop->dev, "failed to prepare/enable aclk\n"); + goto err_disable_hclk; } + /* * do hclk_reset, reset all vop registers. */ @@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop) if (IS_ERR(ahb_rst)) { dev_err(vop->dev, "failed to get ahb reset\n"); ret = PTR_ERR(ahb_rst); - goto err_disable_hclk; + goto err_disable_aclk; } reset_control_assert(ahb_rst); usleep_range(10, 20); @@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop) if (IS_ERR(vop->dclk_rst)) { dev_err(vop->dev, "failed to get dclk reset\n"); ret = PTR_ERR(vop->dclk_rst); - goto err_unprepare_aclk; + goto err_disable_aclk; } reset_control_assert(vop->dclk_rst); usleep_range(10, 20); reset_control_deassert(vop->dclk_rst); clk_disable(vop->hclk); + clk_disable(vop->aclk); vop->is_enabled = false; return 0; +err_disable_aclk: + clk_disable_unprepare(vop->aclk); err_disable_hclk: - clk_disable(vop->hclk); -err_unprepare_aclk: - clk_unprepare(vop->aclk); + clk_disable_unprepare(vop->hclk); err_unprepare_dclk: clk_unprepare(vop->dclk); -err_unprepare_hclk: - clk_unprepare(vop->hclk); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 7a9f4768591e..265064c62d49 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc) struct drm_connector *connector; drm_for_each_connector(connector, crtc->dev) { - if (connector && connector->state->crtc == crtc) { + if (connector->state->crtc == crtc) { struct drm_encoder *encoder = connector->encoder; struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); @@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, dlist_next++; HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); + (u32 __iomem *)vc4_crtc->dlist - + (u32 __iomem *)vc4->hvs->dlist); /* Make the next display list start after ours. */ vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); @@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) * that will take too much. */ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); - if (!primary_plane) { + if (IS_ERR(primary_plane)) { dev_err(dev, "failed to construct primary plane\n"); ret = PTR_ERR(primary_plane); goto err; } cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); - if (!cursor_plane) { + if (IS_ERR(cursor_plane)) { dev_err(dev, "failed to construct cursor plane\n"); ret = PTR_ERR(cursor_plane); goto err_primary; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 6e730605edcc..d5db9e0f3b73 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = { .remove = vc4_platform_drm_remove, .driver = { .name = "vc4-drm", - .owner = THIS_MODULE, .of_match_table = vc4_of_match, }, }; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index ab1673f672a4..8098c5b21ba4 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev) for (i = 0; i < 64; i += 4) { DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", - ((uint32_t *)vc4->hvs->dlist)[i + 0], - ((uint32_t *)vc4->hvs->dlist)[i + 1], - ((uint32_t *)vc4->hvs->dlist)[i + 2], - ((uint32_t *)vc4->hvs->dlist)[i + 3]); + readl((u32 __iomem *)vc4->hvs->dlist + i + 0), + readl((u32 __iomem *)vc4->hvs->dlist + i + 1), + readl((u32 __iomem *)vc4->hvs->dlist + i + 2), + readl((u32 __iomem *)vc4->hvs->dlist + i + 3)); } } diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cdd8b10c0147..887f3caad0be 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state) return state->fb && state->crtc; } -struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) +static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) { struct vc4_plane_state *vc4_state; @@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) return &vc4_state->base; } -void vc4_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) +static void vc4_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); @@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane, } /* Called during init to allocate the plane's atomic state. */ -void vc4_plane_reset(struct drm_plane *plane) +static void vc4_plane_reset(struct drm_plane *plane) { struct vc4_plane_state *vc4_state; @@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane, int crtc_w = state->crtc_w; int crtc_h = state->crtc_h; + if (state->crtc_w << 16 != state->src_w || + state->crtc_h << 16 != state->src_h) { + /* We don't support scaling yet, which involves + * allocating the LBM memory for scaling temporary + * storage, and putting filter kernels in the HVS + * context. + */ + return -EINVAL; + } + if (crtc_x < 0) { offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; crtc_w += crtc_x; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index f545913a56c7..578fe0a9324c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = { .save = virtio_gpu_conn_save, .restore = virtio_gpu_conn_restore, .detect = virtio_gpu_conn_detect, - .fill_modes = drm_helper_probe_single_connector_modes, + .fill_modes = drm_helper_probe_single_connector_modes_nomerge, .destroy = virtio_gpu_conn_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index ba47b30d28fa..f2e13eb8339f 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -28,6 +28,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/of_device.h> +#include <linux/of_graph.h> #include <drm/drm_fourcc.h> @@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev) struct ipu_platform_reg { struct ipu_client_platformdata pdata; const char *name; - int reg_offset; }; +/* These must be in the order of the corresponding device tree port nodes */ static const struct ipu_platform_reg client_reg[] = { { .pdata = { + .csi = 0, + .dma[0] = IPUV3_CHANNEL_CSI0, + .dma[1] = -EINVAL, + }, + .name = "imx-ipuv3-camera", + }, { + .pdata = { + .csi = 1, + .dma[0] = IPUV3_CHANNEL_CSI1, + .dma[1] = -EINVAL, + }, + .name = "imx-ipuv3-camera", + }, { + .pdata = { .di = 0, .dc = 5, .dp = IPU_DP_FLOW_SYNC_BG, @@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = { .dma[1] = -EINVAL, }, .name = "imx-ipuv3-crtc", - }, { - .pdata = { - .csi = 0, - .dma[0] = IPUV3_CHANNEL_CSI0, - .dma[1] = -EINVAL, - }, - .reg_offset = IPU_CM_CSI0_REG_OFS, - .name = "imx-ipuv3-camera", - }, { - .pdata = { - .csi = 1, - .dma[0] = IPUV3_CHANNEL_CSI1, - .dma[1] = -EINVAL, - }, - .reg_offset = IPU_CM_CSI1_REG_OFS, - .name = "imx-ipuv3-camera", }, }; @@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) for (i = 0; i < ARRAY_SIZE(client_reg); i++) { const struct ipu_platform_reg *reg = &client_reg[i]; struct platform_device *pdev; - struct resource res; - - if (reg->reg_offset) { - memset(&res, 0, sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; - res.end = res.start + PAGE_SIZE - 1; - pdev = platform_device_register_resndata(dev, reg->name, - id++, &res, 1, ®->pdata, sizeof(reg->pdata)); - } else { - pdev = platform_device_register_data(dev, reg->name, - id++, ®->pdata, sizeof(reg->pdata)); + + pdev = platform_device_alloc(reg->name, id++); + if (!pdev) { + ret = -ENOMEM; + goto err_register; + } + + pdev->dev.parent = dev; + + /* Associate subdevice with the corresponding port node */ + pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i); + if (!pdev->dev.of_node) { + dev_err(dev, "missing port@%d node in %s\n", i, + dev->of_node->full_name); + ret = -ENODEV; + goto err_register; } - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); + ret = platform_device_add_data(pdev, ®->pdata, + sizeof(reg->pdata)); + if (!ret) + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); goto err_register; } } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index ac1feea51be3..9024a3de4032 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -609,6 +609,7 @@ #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 +#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d #define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a #define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a #define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index c20ac76c0a8c..c690fae02cf8 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) struct lg_drv_data *drv_data; int ret; - /* Only work with the 1st interface (G29 presents multiple) */ - if (iface_num != 0) { + /* G29 only work with the 1st interface */ + if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && + (iface_num != 0)) { dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); return -ENODEV; } diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 94bb137abe32..2324520b006d 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -84,6 +84,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 8b29949507d1..01a4f05c1642 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom) if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { if (features->touch_max) features->device_type |= WACOM_DEVICETYPE_TOUCH; - if (features->type >= INTUOSHT || features->type <= BAMBOO_PT) + if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) features->device_type |= WACOM_DEVICETYPE_PAD; features->x_max = 4096; @@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F = WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x336 = { "Wacom DTU1141", 23472, 13203, 1023, 0, - DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; + DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, + WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x57 = { "Wacom DTK2241", 95640, 54060, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 842b0043ad94..8f59f057cdf4 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -324,6 +324,7 @@ config SENSORS_APPLESMC config SENSORS_ARM_SCPI tristate "ARM SCPI Sensors" depends on ARM_SCPI_PROTOCOL + depends on THERMAL || !THERMAL_OF help This driver provides support for temperature, voltage, current and power sensors available on ARM Ltd's SCP based platforms. The @@ -1471,6 +1472,7 @@ config SENSORS_INA209 config SENSORS_INA2XX tristate "Texas Instruments INA219 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for INA219, INA220, INA226, INA230, and INA231 power monitor chips. diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 1f5e956941b1..0af7fd311979 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s) static int applesmc_init_smcreg_try(void) { struct applesmc_registers *s = &smcreg; - bool left_light_sensor, right_light_sensor; + bool left_light_sensor = 0, right_light_sensor = 0; unsigned int count; u8 tmp[1]; int ret; diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c index 2c1241bbf9af..7e20567bc369 100644 --- a/drivers/hwmon/scpi-hwmon.c +++ b/drivers/hwmon/scpi-hwmon.c @@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev) struct scpi_ops *scpi_ops; struct device *hwdev, *dev = &pdev->dev; struct scpi_sensors *scpi_sensors; - int ret; + int ret, idx; scpi_ops = get_scpi_ops(); if (!scpi_ops) @@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev) scpi_sensors->scpi_ops = scpi_ops; - for (i = 0; i < nr_sensors; i++) { - struct sensor_data *sensor = &scpi_sensors->data[i]; + for (i = 0, idx = 0; i < nr_sensors; i++) { + struct sensor_data *sensor = &scpi_sensors->data[idx]; ret = scpi_ops->sensor_get_info(i, &sensor->info); if (ret) @@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev) num_power++; break; default: - break; + continue; } sensor->dev_attr_input.attr.mode = S_IRUGO; @@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev) sensor->dev_attr_label.show = scpi_show_label; sensor->dev_attr_label.attr.name = sensor->label; - scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr; - scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr; + scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr; + scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr; - sysfs_attr_init(scpi_sensors->attrs[i << 1]); - sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]); + sysfs_attr_init(scpi_sensors->attrs[idx << 1]); + sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]); + idx++; } scpi_sensors->group.attrs = scpi_sensors->attrs; @@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev) zone->sensor_id = i; zone->scpi_sensors = scpi_sensors; - zone->tzd = thermal_zone_of_sensor_register(dev, i, zone, - &scpi_sensor_ops); + zone->tzd = thermal_zone_of_sensor_register(dev, + sensor->info.sensor_id, zone, &scpi_sensor_ops); /* * The call to thermal_zone_of_sensor_register returns * an error for sensors that are not associated with diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e24c2b680b47..7b0aa82ea38b 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -126,6 +126,7 @@ config I2C_I801 Sunrise Point-LP (PCH) DNV (SOC) Broxton (SOC) + Lewisburg (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c306751ceadb..f62d69799a9c 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -62,6 +62,8 @@ * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes * DNV (SOC) 0x19df 32 hard yes yes yes * Broxton (SOC) 0x5ad4 32 hard yes yes yes + * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes + * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -206,6 +208,8 @@ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 +#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3 +#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 struct i801_mux_config { char *gpio_chip; @@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 1e4d99da4164..9bb0b056b25f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -50,6 +50,7 @@ #include <linux/of_device.h> #include <linux/of_dma.h> #include <linux/of_gpio.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_data/i2c-imx.h> #include <linux/platform_device.h> #include <linux/sched.h> diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index e23a7b068c60..0b20449e48cf 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c) static void xiic_start_xfer(struct xiic_i2c *i2c) { - + spin_lock(&i2c->lock); + xiic_reinit(i2c); __xiic_start_xfer(i2c); + spin_unlock(&i2c->lock); } static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 040af5cc8143..ba8eb087f224 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev) if (wakeirq > 0 && wakeirq != client->irq) status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); else if (client->irq > 0) - status = dev_pm_set_wake_irq(dev, wakeirq); + status = dev_pm_set_wake_irq(dev, client->irq); else status = 0; diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index eea0c79111e7..4d960d3b93c0 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -101,7 +101,7 @@ #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ /* ID Register Bit Designations (AD7793_REG_ID) */ -#define AD7785_ID 0xB +#define AD7785_ID 0x3 #define AD7792_ID 0xA #define AD7793_ID 0xB #define AD7794_ID 0xF diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 599cde3d03a1..b10f629cc44b 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -106,6 +106,13 @@ #define DEFAULT_SAMPLE_TIME 1000 +/* V at 25°C of 696 mV */ +#define VF610_VTEMP25_3V0 950 +/* V at 25°C of 699 mV */ +#define VF610_VTEMP25_3V3 867 +/* Typical sensor slope coefficient at all temperatures */ +#define VF610_TEMP_SLOPE_COEFF 1840 + enum clk_sel { VF610_ADCIOC_BUSCLK_SET, VF610_ADCIOC_ALTCLK_SET, @@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info) adc_feature->clk_div = 8; } + adck_rate = ipg_rate / adc_feature->clk_div; + /* * Determine the long sample time adder value to be used based * on the default minimum sample time provided. @@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info) * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles */ - adck_rate = ipg_rate / info->adc_feature.clk_div; for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) info->sample_freq_avail[i] = adck_rate / (6 + vf610_hw_avgs[i] * @@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev, break; case IIO_TEMP: /* - * Calculate in degree Celsius times 1000 - * Using sensor slope of 1.84 mV/°C and - * V at 25°C of 696 mV - */ - *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; + * Calculate in degree Celsius times 1000 + * Using the typical sensor slope of 1.84 mV/°C + * and VREFH_ADC at 3.3V, V at 25°C of 699 mV + */ + *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) * + 1000000 / VF610_TEMP_SLOPE_COEFF; + break; default: mutex_unlock(&indio_dev->mlock); diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 0370624a35db..02e636a1c49a 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, case XADC_REG_VCCINT: case XADC_REG_VCCAUX: case XADC_REG_VREFP: + case XADC_REG_VREFN: case XADC_REG_VCCBRAM: case XADC_REG_VCCPINT: case XADC_REG_VCCPAUX: diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 9e4d2c18b554..81ca0081a019 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -113,12 +113,16 @@ enum ad5064_type { ID_AD5065, ID_AD5628_1, ID_AD5628_2, + ID_AD5629_1, + ID_AD5629_2, ID_AD5648_1, ID_AD5648_2, ID_AD5666_1, ID_AD5666_2, ID_AD5668_1, ID_AD5668_2, + ID_AD5669_1, + ID_AD5669_2, }; static int ad5064_write(struct ad5064_state *st, unsigned int cmd, @@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { { }, }; -#define AD5064_CHANNEL(chan, addr, bits) { \ +#define AD5064_CHANNEL(chan, addr, bits, _shift) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ @@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { .sign = 'u', \ .realbits = (bits), \ .storagebits = 16, \ - .shift = 20 - bits, \ + .shift = (_shift), \ }, \ .ext_info = ad5064_ext_info, \ } -#define DECLARE_AD5064_CHANNELS(name, bits) \ +#define DECLARE_AD5064_CHANNELS(name, bits, shift) \ const struct iio_chan_spec name[] = { \ - AD5064_CHANNEL(0, 0, bits), \ - AD5064_CHANNEL(1, 1, bits), \ - AD5064_CHANNEL(2, 2, bits), \ - AD5064_CHANNEL(3, 3, bits), \ - AD5064_CHANNEL(4, 4, bits), \ - AD5064_CHANNEL(5, 5, bits), \ - AD5064_CHANNEL(6, 6, bits), \ - AD5064_CHANNEL(7, 7, bits), \ + AD5064_CHANNEL(0, 0, bits, shift), \ + AD5064_CHANNEL(1, 1, bits, shift), \ + AD5064_CHANNEL(2, 2, bits, shift), \ + AD5064_CHANNEL(3, 3, bits, shift), \ + AD5064_CHANNEL(4, 4, bits, shift), \ + AD5064_CHANNEL(5, 5, bits, shift), \ + AD5064_CHANNEL(6, 6, bits, shift), \ + AD5064_CHANNEL(7, 7, bits, shift), \ } -#define DECLARE_AD5065_CHANNELS(name, bits) \ +#define DECLARE_AD5065_CHANNELS(name, bits, shift) \ const struct iio_chan_spec name[] = { \ - AD5064_CHANNEL(0, 0, bits), \ - AD5064_CHANNEL(1, 3, bits), \ + AD5064_CHANNEL(0, 0, bits, shift), \ + AD5064_CHANNEL(1, 3, bits, shift), \ } -static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); -static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); -static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); +static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8); +static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6); +static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4); -static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); -static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); -static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); +static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8); +static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6); +static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4); + +static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4); +static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0); static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { [ID_AD5024] = { @@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { .channels = ad5024_channels, .num_channels = 8, }, + [ID_AD5629_1] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5629_channels, + .num_channels = 8, + }, + [ID_AD5629_2] = { + .shared_vref = true, + .internal_vref = 5000000, + .channels = ad5629_channels, + .num_channels = 8, + }, [ID_AD5648_1] = { .shared_vref = true, .internal_vref = 2500000, @@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { .channels = ad5064_channels, .num_channels = 8, }, + [ID_AD5669_1] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5669_channels, + .num_channels = 8, + }, + [ID_AD5669_2] = { + .shared_vref = true, + .internal_vref = 5000000, + .channels = ad5669_channels, + .num_channels = 8, + }, }; static inline unsigned int ad5064_num_vref(struct ad5064_state *st) @@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, unsigned int addr, unsigned int val) { struct i2c_client *i2c = to_i2c_client(st->dev); + int ret; st->data.i2c[0] = (cmd << 4) | addr; put_unaligned_be16(val, &st->data.i2c[1]); - return i2c_master_send(i2c, st->data.i2c, 3); + + ret = i2c_master_send(i2c, st->data.i2c, 3); + if (ret < 0) + return ret; + + return 0; } static int ad5064_i2c_probe(struct i2c_client *i2c, @@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c) } static const struct i2c_device_id ad5064_i2c_ids[] = { - {"ad5629-1", ID_AD5628_1}, - {"ad5629-2", ID_AD5628_2}, - {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ - {"ad5669-1", ID_AD5668_1}, - {"ad5669-2", ID_AD5668_2}, - {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ + {"ad5629-1", ID_AD5629_1}, + {"ad5629-2", ID_AD5629_2}, + {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */ + {"ad5669-1", ID_AD5669_1}, + {"ad5669-2", ID_AD5669_2}, + {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */ {} }; MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index 12128d1ca570..71991b5c0658 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c @@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - ret = i2c_smbus_read_word_data(*client, - chan->type == IIO_TEMP ? - SI7020CMD_TEMP_HOLD : - SI7020CMD_RH_HOLD); + ret = i2c_smbus_read_word_swapped(*client, + chan->type == IIO_TEMP ? + SI7020CMD_TEMP_HOLD : + SI7020CMD_RH_HOLD); if (ret < 0) return ret; *val = ret >> 2; diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index cbe198cb3699..471ee36b9c6e 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, u8 *page_addr = (u8 *) (pa & PAGE_MASK); dma_addr_t start_dma_addr = dma_addr; unsigned long irq_flags, nr_pages, i; + unsigned long *entry; int rc = 0; if (dma_addr < s390_domain->domain.geometry.aperture_start || @@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); for (i = 0; i < nr_pages; i++) { - dma_update_cpu_trans(s390_domain->dma_table, page_addr, - dma_addr, flags); + entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); + if (!entry) { + rc = -ENOMEM; + goto undo_cpu_trans; + } + dma_update_cpu_trans(entry, page_addr, flags); page_addr += PAGE_SIZE; dma_addr += PAGE_SIZE; } @@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, break; } spin_unlock(&s390_domain->list_lock); + +undo_cpu_trans: + if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { + flags = ZPCI_PTE_INVALID; + while (i-- > 0) { + page_addr -= PAGE_SIZE; + dma_addr -= PAGE_SIZE; + entry = dma_walk_cpu_trans(s390_domain->dma_table, + dma_addr); + if (!entry) + break; + dma_update_cpu_trans(entry, page_addr, flags); + } + } spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); return rc; diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 44a077f3a4a2..f174ce0ca361 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs, writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); /* - * Disable all interrupts. Leave the PPI and SGIs alone - * as they are enabled by redistributor registers. + * Deactivate and disable all SPIs. Leave the PPI and SGIs + * alone as they are in the redistributor registers on GICv3. */ - for (i = 32; i < gic_irqs; i += 32) + for (i = 32; i < gic_irqs; i += 32) { writel_relaxed(GICD_INT_EN_CLR_X32, - base + GIC_DIST_ENABLE_CLEAR + i / 8); + base + GIC_DIST_ACTIVE_CLEAR + i / 8); + writel_relaxed(GICD_INT_EN_CLR_X32, + base + GIC_DIST_ENABLE_CLEAR + i / 8); + } if (sync_access) sync_access(); @@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) /* * Deal with the banked PPI and SGI interrupts - disable all * PPI interrupts, ensure all SGI interrupts are enabled. + * Make sure everything is deactivated. */ + writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 515c823c1c95..abf2ffaed392 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -73,9 +73,11 @@ struct gic_chip_data { union gic_base cpu_base; #ifdef CONFIG_CPU_PM u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; + u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; u32 __percpu *saved_ppi_enable; + u32 __percpu *saved_ppi_active; u32 __percpu *saved_ppi_conf; #endif struct irq_domain *domain; @@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) gic_data[gic_nr].saved_spi_enable[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) + gic_data[gic_nr].saved_spi_active[i] = + readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); } /* @@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr) writel_relaxed(gic_data[gic_nr].saved_spi_target[i], dist_base + GIC_DIST_TARGET + i * 4); - for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { + writel_relaxed(GICD_INT_EN_CLR_X32, + dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); + } + + for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { + writel_relaxed(GICD_INT_EN_CLR_X32, + dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); + writel_relaxed(gic_data[gic_nr].saved_spi_active[i], + dist_base + GIC_DIST_ACTIVE_SET + i * 4); + } writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); } @@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr) for (i = 0; i < DIV_ROUND_UP(32, 32); i++) ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); + ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) + ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); + ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); for (i = 0; i < DIV_ROUND_UP(32, 16); i++) ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); @@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr) return; ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); - for (i = 0; i < DIV_ROUND_UP(32, 32); i++) + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { + writel_relaxed(GICD_INT_EN_CLR_X32, + dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); + } + + ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); + for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { + writel_relaxed(GICD_INT_EN_CLR_X32, + dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); + writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); + } ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); for (i = 0; i < DIV_ROUND_UP(32, 16); i++) @@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic) sizeof(u32)); BUG_ON(!gic->saved_ppi_enable); + gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, + sizeof(u32)); + BUG_ON(!gic->saved_ppi_active); + gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, sizeof(u32)); BUG_ON(!gic->saved_ppi_conf); diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index b33f53b3ca93..bf04d2a3cf4a 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if) ptr--; *ptr++ = '\n'; *ptr = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 4a4825528188..90449e1e91e5 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -901,7 +901,7 @@ Begin: ptr--; *ptr++ = '\n'; *ptr = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); } diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index b1fad81f0722..13b2151c10f5 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c @@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs) ptr--; *ptr++ = '\n'; *ptr = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); } diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c index b420f8bd862e..ba4beb25d872 100644 --- a/drivers/isdn/hisax/q931.c +++ b/drivers/isdn/hisax/q931.c @@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size) dp--; *dp++ = '\n'; *dp = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); } @@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir) } if (finish) { *dp = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); return; } if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ @@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir) dp += sprintf(dp, "Unknown protocol %x!", buf[0]); } *dp = 0; - HiSax_putstatus(cs, NULL, "%s", cs->dlog); + HiSax_putstatus(cs, NULL, cs->dlog); } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index f659e605a406..86ce887b2ed6 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -123,6 +123,26 @@ void nvm_unregister_mgr(struct nvmm_type *mt) } EXPORT_SYMBOL(nvm_unregister_mgr); +/* register with device with a supported manager */ +static int register_mgr(struct nvm_dev *dev) +{ + struct nvmm_type *mt; + int ret = 0; + + list_for_each_entry(mt, &nvm_mgrs, list) { + ret = mt->register_mgr(dev); + if (ret > 0) { + dev->mt = mt; + break; /* successfully initialized */ + } + } + + if (!ret) + pr_info("nvm: no compatible nvm manager found.\n"); + + return ret; +} + static struct nvm_dev *nvm_find_nvm_dev(const char *name) { struct nvm_dev *dev; @@ -160,11 +180,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) } EXPORT_SYMBOL(nvm_erase_blk); -static void nvm_core_free(struct nvm_dev *dev) -{ - kfree(dev); -} - static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; @@ -179,12 +194,21 @@ static int nvm_core_init(struct nvm_dev *dev) dev->sec_size = grp->csecs; dev->oob_size = grp->sos; dev->sec_per_pg = grp->fpg_sz / grp->csecs; - dev->addr_mode = id->ppat; - dev->addr_format = id->ppaf; + memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); dev->plane_mode = NVM_PLANE_SINGLE; dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; + if (grp->mtype != 0) { + pr_err("nvm: memory type not supported\n"); + return -EINVAL; + } + + if (grp->fmtype != 0 && grp->fmtype != 1) { + pr_err("nvm: flash type not supported\n"); + return -EINVAL; + } + if (grp->mpos & 0x020202) dev->plane_mode = NVM_PLANE_DOUBLE; if (grp->mpos & 0x040404) @@ -213,21 +237,17 @@ static void nvm_free(struct nvm_dev *dev) if (dev->mt) dev->mt->unregister_mgr(dev); - - nvm_core_free(dev); } static int nvm_init(struct nvm_dev *dev) { - struct nvmm_type *mt; - int ret = 0; + int ret = -EINVAL; if (!dev->q || !dev->ops) - return -EINVAL; + return ret; if (dev->ops->identity(dev->q, &dev->identity)) { pr_err("nvm: device could not be identified\n"); - ret = -EINVAL; goto err; } @@ -251,21 +271,13 @@ static int nvm_init(struct nvm_dev *dev) goto err; } - /* register with device with a supported manager */ - list_for_each_entry(mt, &nvm_mgrs, list) { - ret = mt->register_mgr(dev); - if (ret < 0) - goto err; /* initialization failed */ - if (ret > 0) { - dev->mt = mt; - break; /* successfully initialized */ - } - } - - if (!ret) { - pr_info("nvm: no compatible manager found.\n"); + down_write(&nvm_lock); + ret = register_mgr(dev); + up_write(&nvm_lock); + if (ret < 0) + goto err; + if (!ret) return 0; - } pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", dev->name, dev->sec_per_pg, dev->nr_planes, @@ -273,7 +285,6 @@ static int nvm_init(struct nvm_dev *dev) dev->nr_chnls); return 0; err: - nvm_free(dev); pr_err("nvm: failed to initialize nvm\n"); return ret; } @@ -308,22 +319,26 @@ int nvm_register(struct request_queue *q, char *disk_name, if (ret) goto err_init; - down_write(&nvm_lock); - list_add(&dev->devices, &nvm_devices); - up_write(&nvm_lock); + if (dev->ops->max_phys_sect > 256) { + pr_info("nvm: max sectors supported is 256.\n"); + ret = -EINVAL; + goto err_init; + } if (dev->ops->max_phys_sect > 1) { dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, "ppalist"); if (!dev->ppalist_pool) { pr_err("nvm: could not create ppa pool\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_init; } - } else if (dev->ops->max_phys_sect > 256) { - pr_info("nvm: max sectors supported is 256.\n"); - return -EINVAL; } + down_write(&nvm_lock); + list_add(&dev->devices, &nvm_devices); + up_write(&nvm_lock); + return 0; err_init: kfree(dev); @@ -333,19 +348,22 @@ EXPORT_SYMBOL(nvm_register); void nvm_unregister(char *disk_name) { - struct nvm_dev *dev = nvm_find_nvm_dev(disk_name); + struct nvm_dev *dev; + down_write(&nvm_lock); + dev = nvm_find_nvm_dev(disk_name); if (!dev) { pr_err("nvm: could not find device %s to unregister\n", disk_name); + up_write(&nvm_lock); return; } - nvm_exit(dev); - - down_write(&nvm_lock); list_del(&dev->devices); up_write(&nvm_lock); + + nvm_exit(dev); + kfree(dev); } EXPORT_SYMBOL(nvm_unregister); @@ -358,38 +376,30 @@ static int nvm_create_target(struct nvm_dev *dev, { struct nvm_ioctl_create_simple *s = &create->conf.s; struct request_queue *tqueue; - struct nvmm_type *mt; struct gendisk *tdisk; struct nvm_tgt_type *tt; struct nvm_target *t; void *targetdata; int ret = 0; + down_write(&nvm_lock); if (!dev->mt) { - /* register with device with a supported NVM manager */ - list_for_each_entry(mt, &nvm_mgrs, list) { - ret = mt->register_mgr(dev); - if (ret < 0) - return ret; /* initialization failed */ - if (ret > 0) { - dev->mt = mt; - break; /* successfully initialized */ - } - } - - if (!ret) { - pr_info("nvm: no compatible nvm manager found.\n"); - return -ENODEV; + ret = register_mgr(dev); + if (!ret) + ret = -ENODEV; + if (ret < 0) { + up_write(&nvm_lock); + return ret; } } tt = nvm_find_target_type(create->tgttype); if (!tt) { pr_err("nvm: target type %s not found\n", create->tgttype); + up_write(&nvm_lock); return -EINVAL; } - down_write(&nvm_lock); list_for_each_entry(t, &dev->online_targets, list) { if (!strcmp(create->tgtname, t->disk->disk_name)) { pr_err("nvm: target name already exists.\n"); @@ -457,11 +467,11 @@ static void nvm_remove_target(struct nvm_target *t) lockdep_assert_held(&nvm_lock); del_gendisk(tdisk); + blk_cleanup_queue(q); + if (tt->exit) tt->exit(tdisk->private_data); - blk_cleanup_queue(q); - put_disk(tdisk); list_del(&t->list); @@ -473,7 +483,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) struct nvm_dev *dev; struct nvm_ioctl_create_simple *s; + down_write(&nvm_lock); dev = nvm_find_nvm_dev(create->dev); + up_write(&nvm_lock); if (!dev) { pr_err("nvm: device not found\n"); return -EINVAL; @@ -532,7 +544,9 @@ static int nvm_configure_show(const char *val) return -EINVAL; } + down_write(&nvm_lock); dev = nvm_find_nvm_dev(devname); + up_write(&nvm_lock); if (!dev) { pr_err("nvm: device not found\n"); return -EINVAL; @@ -541,7 +555,7 @@ static int nvm_configure_show(const char *val) if (!dev->mt) return 0; - dev->mt->free_blocks_print(dev); + dev->mt->lun_info_print(dev); return 0; } @@ -677,8 +691,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg) info->tgtsize = tgt_iter; up_write(&nvm_lock); - if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) + if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { + kfree(info); return -EFAULT; + } kfree(info); return 0; @@ -721,8 +737,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) devices->nr_devices = i; - if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices))) + if (copy_to_user(arg, devices, + sizeof(struct nvm_ioctl_get_devices))) { + kfree(devices); return -EFAULT; + } kfree(devices); return 0; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ae1fb2bdc5f4..35dde84b71e9 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -60,23 +60,27 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.nr_free_blocks = dev->blks_per_lun; + lun->vlun.nr_inuse_blocks = 0; + lun->vlun.nr_bad_blocks = 0; } return 0; } -static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, +static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, void *private) { struct gen_nvm *gn = private; - struct gen_lun *lun = &gn->luns[lun_id]; + struct nvm_dev *dev = gn->dev; + struct gen_lun *lun; struct nvm_block *blk; int i; - if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) - return 0; + lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; + + for (i = 0; i < nr_blocks; i++) { + if (blks[i] == 0) + continue; - i = -1; - while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { blk = &lun->vlun.blocks[i]; if (!blk) { pr_err("gennvm: BB data is out of bounds.\n"); @@ -84,6 +88,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, } list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; } return 0; @@ -136,6 +141,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) list_move_tail(&blk->list, &lun->used_list); blk->type = 1; lun->vlun.nr_free_blocks--; + lun->vlun.nr_inuse_blocks++; } } @@ -164,15 +170,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) block->id = cur_block_id++; /* First block is reserved for device */ - if (unlikely(lun_iter == 0 && blk_iter == 0)) + if (unlikely(lun_iter == 0 && blk_iter == 0)) { + lun->vlun.nr_free_blocks--; continue; + } list_add_tail(&block->list, &lun->free_list); } if (dev->ops->get_bb_tbl) { - ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, - dev->blks_per_lun, gennvm_block_bb, gn); + struct ppa_addr ppa; + + ppa.ppa = 0; + ppa.g.ch = lun->vlun.chnl_id; + ppa.g.lun = lun->vlun.id; + ppa = generic_to_dev_addr(dev, ppa); + + ret = dev->ops->get_bb_tbl(dev, ppa, + dev->blks_per_lun, + gennvm_block_bb, gn); if (ret) pr_err("gennvm: could not read BB table\n"); } @@ -190,6 +206,14 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) return 0; } +static void gennvm_free(struct nvm_dev *dev) +{ + gennvm_blocks_free(dev); + gennvm_luns_free(dev); + kfree(dev->mp); + dev->mp = NULL; +} + static int gennvm_register(struct nvm_dev *dev) { struct gen_nvm *gn; @@ -199,6 +223,7 @@ static int gennvm_register(struct nvm_dev *dev) if (!gn) return -ENOMEM; + gn->dev = dev; gn->nr_luns = dev->nr_luns; dev->mp = gn; @@ -216,16 +241,13 @@ static int gennvm_register(struct nvm_dev *dev) return 1; err: - kfree(gn); + gennvm_free(dev); return ret; } static void gennvm_unregister(struct nvm_dev *dev) { - gennvm_blocks_free(dev); - gennvm_luns_free(dev); - kfree(dev->mp); - dev->mp = NULL; + gennvm_free(dev); } static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, @@ -254,6 +276,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, blk->type = 1; lun->vlun.nr_free_blocks--; + lun->vlun.nr_inuse_blocks++; spin_unlock(&vlun->lock); out: @@ -271,16 +294,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) case 1: list_move_tail(&blk->list, &lun->free_list); lun->vlun.nr_free_blocks++; + lun->vlun.nr_inuse_blocks--; blk->type = 0; break; case 2: list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; + lun->vlun.nr_inuse_blocks--; break; default: WARN_ON_ONCE(1); pr_err("gennvm: erroneous block type (%lu -> %u)\n", blk->id, blk->type); list_move_tail(&blk->list, &lun->bb_list); + lun->vlun.nr_bad_blocks++; + lun->vlun.nr_inuse_blocks--; } spin_unlock(&vlun->lock); @@ -292,10 +320,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) if (rqd->nr_pages > 1) { for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = addr_to_generic_mode(dev, + rqd->ppa_list[i] = dev_to_generic_addr(dev, rqd->ppa_list[i]); } else { - rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); + rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); } } @@ -305,10 +333,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) if (rqd->nr_pages > 1) { for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = generic_to_addr_mode(dev, + rqd->ppa_list[i] = generic_to_dev_addr(dev, rqd->ppa_list[i]); } else { - rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); + rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); } } @@ -354,10 +382,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) { int i; - if (!dev->ops->set_bb) + if (!dev->ops->set_bb_tbl) return; - if (dev->ops->set_bb(dev->q, rqd, 1)) + if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) return; gennvm_addr_to_generic_mode(dev, rqd); @@ -440,15 +468,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) return &gn->luns[lunid].vlun; } -static void gennvm_free_blocks_print(struct nvm_dev *dev) +static void gennvm_lun_info_print(struct nvm_dev *dev) { struct gen_nvm *gn = dev->mp; struct gen_lun *lun; unsigned int i; - gennvm_for_each_lun(gn, lun, i) - pr_info("%s: lun%8u\t%u\n", - dev->name, i, lun->vlun.nr_free_blocks); + + gennvm_for_each_lun(gn, lun, i) { + spin_lock(&lun->vlun.lock); + + pr_info("%s: lun%8u\t%u\t%u\t%u\n", + dev->name, i, + lun->vlun.nr_free_blocks, + lun->vlun.nr_inuse_blocks, + lun->vlun.nr_bad_blocks); + + spin_unlock(&lun->vlun.lock); + } } static struct nvmm_type gennvm = { @@ -466,7 +503,7 @@ static struct nvmm_type gennvm = { .erase_blk = gennvm_erase_blk, .get_lun = gennvm_get_lun, - .free_blocks_print = gennvm_free_blocks_print, + .lun_info_print = gennvm_lun_info_print, }; static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index d23bd3501ddc..9c24b5b32dac 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h @@ -35,6 +35,8 @@ struct gen_lun { }; struct gen_nvm { + struct nvm_dev *dev; + int nr_luns; struct gen_lun *luns; }; diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 7ba64c87ba1c..75e59c3a3f96 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) return blk->id * rrpc->dev->pgs_per_blk; } +static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = r.ppa; + + l.ppa = 0; + + div_u64_rem(ppa, dev->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, dev->sec_per_pg); + div_u64_rem(ppa, dev->sec_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, dev->pgs_per_blk); + div_u64_rem(ppa, dev->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, dev->blks_per_lun); + div_u64_rem(ppa, dev->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, dev->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) { struct ppa_addr paddr; paddr.ppa = addr; - return __linear_to_generic_addr(dev, paddr); + return linear_to_generic_addr(dev, paddr); } /* requires lun->lock taken */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 917d47e290ae..3147c8d09ea8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -112,7 +112,8 @@ struct iv_tcw_private { * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, + DM_CRYPT_EXIT_THREAD}; /* * The fields in here must be read only after initialization. @@ -1203,20 +1204,18 @@ continue_locked: if (!RB_EMPTY_ROOT(&cc->write_tree)) goto pop_from_list; + if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { + spin_unlock_irq(&cc->write_thread_wait.lock); + break; + } + __set_current_state(TASK_INTERRUPTIBLE); __add_wait_queue(&cc->write_thread_wait, &wait); spin_unlock_irq(&cc->write_thread_wait.lock); - if (unlikely(kthread_should_stop())) { - set_task_state(current, TASK_RUNNING); - remove_wait_queue(&cc->write_thread_wait, &wait); - break; - } - schedule(); - set_task_state(current, TASK_RUNNING); spin_lock_irq(&cc->write_thread_wait.lock); __remove_wait_queue(&cc->write_thread_wait, &wait); goto continue_locked; @@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti) if (!cc) return; - if (cc->write_thread) + if (cc->write_thread) { + spin_lock_irq(&cc->write_thread_wait.lock); + set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); + wake_up_locked(&cc->write_thread_wait); + spin_unlock_irq(&cc->write_thread_wait.lock); kthread_stop(cc->write_thread); + } if (cc->io_queue) destroy_workqueue(cc->io_queue); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaa6caa46a9f..cfa29f574c2a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct multipath *m = ti->private; - struct pgpath *pgpath; unsigned long flags; int r; - r = 0; - spin_lock_irqsave(&m->lock, flags); if (!m->current_pgpath) __choose_pgpath(m, 0); - pgpath = m->current_pgpath; - - if (pgpath) { - *bdev = pgpath->path.dev->bdev; - *mode = pgpath->path.dev->mode; + if (m->current_pgpath) { + if (!m->queue_io) { + *bdev = m->current_pgpath->path.dev->bdev; + *mode = m->current_pgpath->path.dev->mode; + r = 0; + } else { + /* pg_init has not started or completed */ + r = -ENOTCONN; + } + } else { + /* No path is available */ + if (m->queue_if_no_path) + r = -ENOTCONN; + else + r = -EIO; } - if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) - r = -ENOTCONN; - else if (!*bdev) - r = -EIO; - spin_unlock_irqrestore(&m->lock, flags); - if (r == -ENOTCONN && !fatal_signal_pending(current)) { + if (r == -ENOTCONN) { spin_lock_irqsave(&m->lock, flags); if (!m->current_pg) { /* Path status changed, redo selection */ diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 3897b90bd462..63903a5a5d9e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) case PM_WRITE: if (old_mode != new_mode) notify_of_pool_mode_change(pool, "write"); + pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard_bio; @@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct thin_c *tc = ti->private; struct pool *pool = tc->pool; - struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); - if (!pool_limits->discard_granularity) - return; /* pool's discard support is disabled */ + if (!pool->pf.discard_enabled) + return; limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e15f3565892..5df40480228b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -591,7 +591,7 @@ retry: out: dm_put_live_table(md, *srcu_idx); - if (r == -ENOTCONN) { + if (r == -ENOTCONN && !fatal_signal_pending(current)) { msleep(10); goto retry; } @@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_target *tgt; + struct block_device *tgt_bdev = NULL; int srcu_idx, r; - r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); + r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); if (r < 0) return r; @@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, goto out; } - r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg); out: dm_put_live_table(md, srcu_idx); return r; diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 35759a91d47d..e8f847226a19 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev, (unsigned long long)pci_resource_start(pci_dev, 0)); pci_set_master(pci_dev); - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { + err = pci_set_dma_mask(pci_dev, 0xffffffff); + if (err) { printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); - err = -EIO; goto fail_context; } diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index dbc695f32760..0042803a9de7 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev, dev->pci_lat, (unsigned long long)dev->base_io_addr); pci_set_master(pci_dev); - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { + err = pci_set_dma_mask(pci_dev, 0xffffffff); + if (err) { pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); err = -EIO; goto fail_irq; diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 0ed1b6530374..1b5268f9bb24 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci, return err; } - if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(pci,DMA_BIT_MASK(32)); + if (err) { dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); - err = -EIO; cx88_core_put(core, pci); return err; } diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 9db7767d1fe0..f34c229f9b37 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c @@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev) if (pci_enable_device(dev->pci)) return -EIO; pci_set_master(dev->pci); - if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32)); + if (err) { printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); return -EIO; } diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 0de1ad5a977d..aef9acf351f6 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev, dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); pci_set_master(pci_dev); - if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32)); + if (err) { printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); - err = -EIO; goto fail_core; } dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 60b2d462f98d..3fdbd81b5580 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, "%s(): board vendor 0x%x, revision 0x%x\n", __func__, board_vendor, board_revision); pci_set_master(pci_dev); - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { + if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) { dev_err(&pci_dev->dev, "%s(): 32bit PCI DMA is not supported\n", __func__); goto pci_detect_err; diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index e79d63eb774e..f720cea80e28 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); pci_set_master(pci_dev); - if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (err) { pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); - err = -EIO; goto fail1; } diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 8f36b48ef733..8bbd092fbe1d 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev, pci_set_master(pci_dev); /* TODO */ - if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { + err = pci_set_dma_mask(pci_dev, 0xffffffff); + if (err) { printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); - err = -EIO; goto fail_irq; } diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c index 8c5655d351d3..4e77618fbb2b 100644 --- a/drivers/media/pci/tw68/tw68-core.c +++ b/drivers/media/pci/tw68/tw68-core.c @@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev, dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); pci_set_master(pci_dev); - if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (err) { pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); - err = -EIO; goto fail1; } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 23b6c8e8701c..d8486168415a 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block"); #define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) -#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ - (req->cmd_flags & REQ_META)) && \ +#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ (rq_data_dir(req) == WRITE)) #define PACKED_CMD_VER 0x01 #define PACKED_CMD_WR 0x02 @@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, /* * Reliable writes are used to implement Forced Unit Access and - * REQ_META accesses, and are supported only on MMCs. - * - * XXX: this really needs a good explanation of why REQ_META - * is treated special. + * are supported only on MMCs. */ - bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || - (req->cmd_flags & REQ_META)) && + bool do_rel_wr = (req->cmd_flags & REQ_FUA) && (rq_data_dir(req) == WRITE) && (md->flags & MMC_BLK_REL_WR); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c793fda27321..3a9a79ec4343 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card) return err; } +/* Caller must hold re-tuning */ +static int mmc_switch_status(struct mmc_card *card) +{ + u32 status; + int err; + + err = mmc_send_status(card, &status); + if (err) + return err; + + return mmc_switch_status_error(card->host, status); +} + static int mmc_select_hs400(struct mmc_card *card) { struct mmc_host *host = card->host; + bool send_status = true; + unsigned int max_dtr; int err = 0; u8 val; @@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card) host->ios.bus_width == MMC_BUS_WIDTH_8)) return 0; - /* - * Before switching to dual data rate operation for HS400, - * it is required to convert from HS200 mode to HS mode. - */ - mmc_set_timing(card->host, MMC_TIMING_MMC_HS); - mmc_set_bus_speed(card); + if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) + send_status = false; + /* Reduce frequency to HS frequency */ + max_dtr = card->ext_csd.hs_max_dtr; + mmc_set_clock(host, max_dtr); + + /* Switch card to HS mode */ val = EXT_CSD_TIMING_HS | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, true, true); + true, send_status, true); if (err) { pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", mmc_hostname(host), err); return err; } + /* Set host controller to HS timing */ + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + + if (!send_status) { + err = mmc_switch_status(card); + if (err) + goto out_err; + } + + /* Switch card to DDR */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_DDR_BUS_WIDTH_8, @@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card) return err; } + /* Switch card to HS400 */ val = EXT_CSD_TIMING_HS400 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, true, true); + true, send_status, true); if (err) { pr_err("%s: switch to hs400 failed, err:%d\n", mmc_hostname(host), err); return err; } + /* Set host controller to HS400 timing and frequency */ mmc_set_timing(host, MMC_TIMING_MMC_HS400); mmc_set_bus_speed(card); + if (!send_status) { + err = mmc_switch_status(card); + if (err) + goto out_err; + } + return 0; + +out_err: + pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), + __func__, err); + return err; } int mmc_hs200_to_hs400(struct mmc_card *card) @@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card) return mmc_select_hs400(card); } -/* Caller must hold re-tuning */ -static int mmc_switch_status(struct mmc_card *card) -{ - u32 status; - int err; - - err = mmc_send_status(card, &status); - if (err) - return err; - - return mmc_switch_status_error(card->host, status); -} - int mmc_hs400_to_hs200(struct mmc_card *card) { struct mmc_host *host = card->host; @@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card) static int mmc_select_hs200(struct mmc_card *card) { struct mmc_host *host = card->host; + bool send_status = true; + unsigned int old_timing; int err = -EINVAL; u8 val; @@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card) mmc_select_driver_type(card); + if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) + send_status = false; + /* * Set the bus width(4 or 8) with host's support and * switch to HS200 mode if bus width is set successfully. @@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, - true, true, true); - if (!err) - mmc_set_timing(host, MMC_TIMING_MMC_HS200); + true, send_status, true); + if (err) + goto err; + old_timing = host->ios.timing; + mmc_set_timing(host, MMC_TIMING_MMC_HS200); + if (!send_status) { + err = mmc_switch_status(card); + /* + * mmc_select_timing() assumes timing has not changed if + * it is a switch error. + */ + if (err == -EBADMSG) + mmc_set_timing(host, old_timing); + } } err: + if (err) + pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), + __func__, err); return err; } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index af71de5fda3b..1dee533634c9 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -473,6 +473,7 @@ config MMC_DAVINCI config MMC_GOLDFISH tristate "goldfish qemu Multimedia Card Interface support" + depends on HAS_DMA depends on GOLDFISH || COMPILE_TEST help This selects the Goldfish Multimedia card Interface emulation diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 39568cc29a2a..33dfd7e72516 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) int start = 0, len = 0; int start_final = 0, len_final = 0; u8 final_phase = 0xff; - struct msdc_delay_phase delay_phase; + struct msdc_delay_phase delay_phase = { 0, }; if (delay == 0) { dev_err(host->dev, "phase error: [map:%x]\n", delay); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 8cadd74e8407..ce08896b9d69 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev) goto out; } else { mmc->caps |= host->pdata->gpio_card_ro_invert ? - MMC_CAP2_RO_ACTIVE_HIGH : 0; + 0 : MMC_CAP2_RO_ACTIVE_HIGH; } if (gpio_is_valid(gpio_cd)) diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index dc4e8446f1ff..5a99a93ed025 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -25,6 +25,7 @@ #include <linux/gpio.h> +#include <asm/mach-jz4740/gpio.h> #include <asm/mach-jz4740/jz4740_nand.h> #define JZ_REG_NAND_CTRL 0x50 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index cc74142938b0..ece544efccc3 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd) */ static void nand_shutdown(struct mtd_info *mtd) { - nand_get_device(mtd, FL_SHUTDOWN); + nand_get_device(mtd, FL_PM_SUSPENDED); } /* Set default functions */ diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 57dadd52b428..1deb8ff90a89 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) cf->data[2] |= CAN_ERR_PROT_FORM; else if (status & SER) cf->data[2] |= CAN_ERR_PROT_STUFF; - else - cf->data[2] |= CAN_ERR_PROT_UNSPEC; } priv->can.state = state; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 5d214d135332..f91b094288da 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev, * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; switch (lec_type) { case LEC_STUFF_ERROR: @@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | - CAN_ERR_PROT_LOC_ACK_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); @@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 70a8cbb29e75..1e37313054f3 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status) cf->data[2] |= CAN_ERR_PROT_BIT0; break; case STAT_LEC_CRC: - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; } } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 868fe945e35a..41c0fc9f3b14 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev, if (reg_esr & FLEXCAN_ESR_ACK_ERR) { netdev_dbg(dev, "ACK_ERR irq\n"); cf->can_id |= CAN_ERR_ACK; - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; tx_errors = 1; } if (reg_esr & FLEXCAN_ESR_CRC_ERR) { netdev_dbg(dev, "CRC_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT; - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; } if (reg_esr & FLEXCAN_ESR_FRM_ERR) { diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index c1e85368a198..5d04f5464faf 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & ECC_SEG; break; } diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index ef655177bb5e..39cf911f7a1e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev, * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; switch (lec_type) { case LEC_STUFF_ERROR: @@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | - CAN_ERR_PROT_LOC_ACK_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); @@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index e187ca783da0..c1317889d3d8 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) stats->rx_errors++; break; case PCH_CRC_ERR: - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; priv->can.can_stats.bus_error++; stats->rx_errors++; break; diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 7bd54191f962..bc46be39549d 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev) u8 ecsr; netdev_dbg(priv->ndev, "Bus error interrupt:\n"); - if (skb) { + if (skb) cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_UNSPEC; - } + ecsr = readb(&priv->regs->ecsr); if (ecsr & RCAR_CAN_ECSR_ADEF) { netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); tx_errors++; writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); if (skb) - cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; } if (ecsr & RCAR_CAN_ECSR_BE0F) { netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); @@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev) rx_errors++; writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); if (skb) - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } if (ecsr & RCAR_CAN_ECSR_AEF) { netdev_dbg(priv->ndev, "ACK Error\n"); @@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev) writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); if (skb) { cf->can_id |= CAN_ERR_ACK; - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } if (ecsr & RCAR_CAN_ECSR_FEF) { diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7b92e911a616..8dda3b703d39 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev) priv->write_reg(priv, SJA1000_RXERR, 0x0); priv->read_reg(priv, SJA1000_ECC); + /* clear interrupt flags */ + priv->read_reg(priv, SJA1000_IR); + /* leave reset mode */ set_normal_mode(dev); } @@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & ECC_SEG; break; } diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index d9a42c646783..68ef0a4cd821 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) >> 16; break; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index cf345cbfe819..680d1ff07a55 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, if (err_status & HECC_BUS_ERROR) { ++priv->can.can_stats.bus_error; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; if (err_status & HECC_CANES_FE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); cf->data[2] |= CAN_ERR_PROT_FORM; @@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } if (err_status & HECC_CANES_CRCE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } if (err_status & HECC_CANES_ACKE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); - cf->data[3] |= CAN_ERR_PROT_LOC_ACK | - CAN_ERR_PROT_LOC_ACK_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 2d390384ef3b..fc5b75675cd8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & SJA1000_ECC_SEG; break; } diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 0e5a4493ba4f..113e64fcd73b 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & SJA1000_ECC_SEG; break; } diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 8b17a9065b0b..022bfa13ebfa 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; if (es->leaf.error_factor & M16C_EF_ACKE) - cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); + cf->data[3] = CAN_ERR_PROT_LOC_ACK; if (es->leaf.error_factor & M16C_EF_CRCE) - cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL); + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; if (es->leaf.error_factor & M16C_EF_FORME) cf->data[2] |= CAN_ERR_PROT_FORM; if (es->leaf.error_factor & M16C_EF_STFE) diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index de95b1ccba3e..a731720f1d13 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, tx_errors = 1; break; case USB_8DEV_STATUSMSG_CRC: - cf->data[2] |= CAN_ERR_PROT_UNSPEC; - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT0: diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index fc55e8e0351d..51670b322409 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { - if (skb) { + if (skb) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_UNSPEC; - } /* Check for Ack error interrupt */ if (err_status & XCAN_ESR_ACKER_MASK) { stats->tx_errors++; if (skb) { cf->can_id |= CAN_ERR_ACK; - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } @@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT; - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | - CAN_ERR_PROT_LOC_CRC_DEL; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } } priv->can.can_stats.bus_error++; diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 9093577755f6..0527f485c3dc 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -15,9 +15,7 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dsa.h> - -#define REG_PORT(p) (8 + (p)) -#define REG_GLOBAL 0x0f +#include "mv88e6060.h" static int reg_read(struct dsa_switch *ds, int addr, int reg) { @@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr) if (bus == NULL) return NULL; - ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); + ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID); if (ret >= 0) { - if (ret == 0x0600) + if (ret == PORT_SWITCH_ID_6060) return "Marvell 88E6060 (A0)"; - if (ret == 0x0601 || ret == 0x0602) + if (ret == PORT_SWITCH_ID_6060_R1 || + ret == PORT_SWITCH_ID_6060_R2) return "Marvell 88E6060 (B0)"; - if ((ret & 0xfff0) == 0x0600) + if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060) return "Marvell 88E6060"; } @@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) unsigned long timeout; /* Set all ports to the disabled state. */ - for (i = 0; i < 6; i++) { - ret = REG_READ(REG_PORT(i), 0x04); - REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + for (i = 0; i < MV88E6060_PORTS; i++) { + ret = REG_READ(REG_PORT(i), PORT_CONTROL); + REG_WRITE(REG_PORT(i), PORT_CONTROL, + ret & ~PORT_CONTROL_STATE_MASK); } /* Wait for transmit queues to drain. */ usleep_range(2000, 4000); /* Reset the switch. */ - REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); + REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, + GLOBAL_ATU_CONTROL_SWRESET | + GLOBAL_ATU_CONTROL_ATUSIZE_1024 | + GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = REG_READ(REG_GLOBAL, 0x00); - if ((ret & 0x8000) == 0x0000) + ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS); + if (ret & GLOBAL_STATUS_INIT_READY) break; usleep_range(1000, 2000); @@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds) * set the maximum frame size to 1536 bytes, and mask all * interrupt sources. */ - REG_WRITE(REG_GLOBAL, 0x04, 0x0800); + REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); /* Enable automatic address learning, set the address * database size to 1024 entries, and set the default aging * time to 5 minutes. */ - REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); + REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, + GLOBAL_ATU_CONTROL_ATUSIZE_1024 | + GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); return 0; } @@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) * state to Forwarding. Additionally, if this is the CPU * port, enable Ingress and Egress Trailer tagging mode. */ - REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); + REG_WRITE(addr, PORT_CONTROL, + dsa_is_cpu_port(ds, p) ? + PORT_CONTROL_TRAILER | + PORT_CONTROL_INGRESS_MODE | + PORT_CONTROL_STATE_FORWARDING : + PORT_CONTROL_STATE_FORWARDING); /* Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the CPU port. */ - REG_WRITE(addr, 0x06, - ((p & 0xf) << 12) | - (dsa_is_cpu_port(ds, p) ? - ds->phys_port_mask : - (1 << ds->dst->cpu_port))); + REG_WRITE(addr, PORT_VLAN_MAP, + ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | + (dsa_is_cpu_port(ds, p) ? + ds->phys_port_mask : + BIT(ds->dst->cpu_port))); /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. */ - REG_WRITE(addr, 0x0b, 1 << p); + REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p)); return 0; } @@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds) if (ret < 0) return ret; - for (i = 0; i < 6; i++) { + for (i = 0; i < MV88E6060_PORTS; i++) { ret = mv88e6060_setup_port(ds, i); if (ret < 0) return ret; @@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds) static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) { - REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); - REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); - REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); + /* Use the same MAC Address as FD Pause frames for all ports */ + REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]); + REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); + REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); return 0; } static int mv88e6060_port_to_phy_addr(int port) { - if (port >= 0 && port <= 5) + if (port >= 0 && port < MV88E6060_PORTS) return port; return -1; } @@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return reg_write(ds, addr, regnum, val); } -static void mv88e6060_poll_link(struct dsa_switch *ds) -{ - int i; - - for (i = 0; i < DSA_MAX_PORTS; i++) { - struct net_device *dev; - int uninitialized_var(port_status); - int link; - int speed; - int duplex; - int fc; - - dev = ds->ports[i]; - if (dev == NULL) - continue; - - link = 0; - if (dev->flags & IFF_UP) { - port_status = reg_read(ds, REG_PORT(i), 0x00); - if (port_status < 0) - continue; - - link = !!(port_status & 0x1000); - } - - if (!link) { - if (netif_carrier_ok(dev)) { - netdev_info(dev, "link down\n"); - netif_carrier_off(dev); - } - continue; - } - - speed = (port_status & 0x0100) ? 100 : 10; - duplex = (port_status & 0x0200) ? 1 : 0; - fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0; - - if (!netif_carrier_ok(dev)) { - netdev_info(dev, - "link up, %d Mb/s, %s duplex, flow control %sabled\n", - speed, - duplex ? "full" : "half", - fc ? "en" : "dis"); - netif_carrier_on(dev); - } - } -} - static struct dsa_switch_driver mv88e6060_switch_driver = { .tag_protocol = DSA_TAG_PROTO_TRAILER, .probe = mv88e6060_probe, @@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = { .set_addr = mv88e6060_set_addr, .phy_read = mv88e6060_phy_read, .phy_write = mv88e6060_phy_write, - .poll_link = mv88e6060_poll_link, }; static int __init mv88e6060_init(void) diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h new file mode 100644 index 000000000000..cc9b2ed4aff4 --- /dev/null +++ b/drivers/net/dsa/mv88e6060.h @@ -0,0 +1,111 @@ +/* + * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support + * Copyright (c) 2015 Neil Armstrong + * + * Based on mv88e6xxx.h + * Copyright (c) 2008 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __MV88E6060_H +#define __MV88E6060_H + +#define MV88E6060_PORTS 6 + +#define REG_PORT(p) (0x8 + (p)) +#define PORT_STATUS 0x00 +#define PORT_STATUS_PAUSE_EN BIT(15) +#define PORT_STATUS_MY_PAUSE BIT(14) +#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN) +#define PORT_STATUS_RESOLVED BIT(13) +#define PORT_STATUS_LINK BIT(12) +#define PORT_STATUS_PORTMODE BIT(11) +#define PORT_STATUS_PHYMODE BIT(10) +#define PORT_STATUS_DUPLEX BIT(9) +#define PORT_STATUS_SPEED BIT(8) +#define PORT_SWITCH_ID 0x03 +#define PORT_SWITCH_ID_6060 0x0600 +#define PORT_SWITCH_ID_6060_MASK 0xfff0 +#define PORT_SWITCH_ID_6060_R1 0x0601 +#define PORT_SWITCH_ID_6060_R2 0x0602 +#define PORT_CONTROL 0x04 +#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15) +#define PORT_CONTROL_TRAILER BIT(14) +#define PORT_CONTROL_HEADER BIT(11) +#define PORT_CONTROL_INGRESS_MODE BIT(8) +#define PORT_CONTROL_VLAN_TUNNEL BIT(7) +#define PORT_CONTROL_STATE_MASK 0x03 +#define PORT_CONTROL_STATE_DISABLED 0x00 +#define PORT_CONTROL_STATE_BLOCKING 0x01 +#define PORT_CONTROL_STATE_LEARNING 0x02 +#define PORT_CONTROL_STATE_FORWARDING 0x03 +#define PORT_VLAN_MAP 0x06 +#define PORT_VLAN_MAP_DBNUM_SHIFT 12 +#define PORT_VLAN_MAP_TABLE_MASK 0x1f +#define PORT_ASSOC_VECTOR 0x0b +#define PORT_ASSOC_VECTOR_MONITOR BIT(15) +#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f +#define PORT_RX_CNTR 0x10 +#define PORT_TX_CNTR 0x11 + +#define REG_GLOBAL 0x0f +#define GLOBAL_STATUS 0x00 +#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12) +#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12) +#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12) +#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12) +#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12) +#define GLOBAL_STATUS_INIT_READY BIT(11) +#define GLOBAL_STATUS_ATU_FULL BIT(3) +#define GLOBAL_STATUS_ATU_DONE BIT(2) +#define GLOBAL_STATUS_PHY_INT BIT(1) +#define GLOBAL_STATUS_EEINT BIT(0) +#define GLOBAL_MAC_01 0x01 +#define GLOBAL_MAC_01_DIFF_ADDR BIT(8) +#define GLOBAL_MAC_23 0x02 +#define GLOBAL_MAC_45 0x03 +#define GLOBAL_CONTROL 0x04 +#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) +#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10) +#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) +#define GLOBAL_CONTROL_CTRMODE BIT(8) +#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3) +#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2) +#define GLOBAL_CONTROL_PHYINT_EN BIT(1) +#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0) +#define GLOBAL_ATU_CONTROL 0x0a +#define GLOBAL_ATU_CONTROL_SWRESET BIT(15) +#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14) +#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12) +#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12) +#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12) +#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4 +#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4) +#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4) +#define GLOBAL_ATU_OP 0x0b +#define GLOBAL_ATU_OP_BUSY BIT(15) +#define GLOBAL_ATU_OP_NOP (0 << 12) +#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_DATA 0x0c +#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0 +#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4 +#define GLOBAL_ATU_DATA_STATE_MASK 0x0f +#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00 +#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e +#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f +#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07 +#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e +#define GLOBAL_ATU_MAC_01 0x0d +#define GLOBAL_ATU_MAC_23 0x0e +#define GLOBAL_ATU_MAC_45 0x0f + +#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 05aa7597dab9..31c5e476fd64 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" +source "drivers/net/ethernet/aurora/Kconfig" source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" @@ -78,7 +79,6 @@ source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" -source "drivers/net/ethernet/icplus/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index ddfc808110a1..071f84eb6f3f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ +obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ obj-$(CONFIG_NET_CADENCE) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ @@ -41,7 +42,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ -obj-$(CONFIG_IP1000) += icplus/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index e2afabf3a465..7ccebae9cb48 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { + err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); + if (err) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); - return -ENODEV; + return err; } if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 991412ce6f48..9147a0107c44 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -450,12 +450,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - pdata->ring_ops->wr_cmd(tx_ring, count); skb_tx_timestamp(skb); pdata->stats.tx_packets++; pdata->stats.tx_bytes += skb->len; + pdata->ring_ops->wr_cmd(tx_ring, count); return NETDEV_TX_OK; } @@ -688,10 +688,10 @@ static int xgene_enet_open(struct net_device *ndev) mac_ops->tx_enable(pdata); mac_ops->rx_enable(pdata); + xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); if (ret) return ret; - xgene_enet_napi_enable(pdata); if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) phy_start(pdata->phy_dev); @@ -715,13 +715,13 @@ static int xgene_enet_close(struct net_device *ndev) else cancel_delayed_work_sync(&pdata->link_work); - xgene_enet_napi_disable(pdata); - xgene_enet_free_irq(ndev); - xgene_enet_process_ring(pdata->rx_ring, -1); - mac_ops->tx_disable(pdata); mac_ops->rx_disable(pdata); + xgene_enet_free_irq(ndev); + xgene_enet_napi_disable(pdata); + xgene_enet_process_ring(pdata->rx_ring, -1); + return 0; } @@ -1474,15 +1474,15 @@ static int xgene_enet_probe(struct platform_device *pdev) } ndev->hw_features = ndev->features; - ret = register_netdev(ndev); + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) { - netdev_err(ndev, "Failed to register netdev\n"); + netdev_err(ndev, "No usable DMA configuration\n"); goto err; } - ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); + ret = register_netdev(ndev); if (ret) { - netdev_err(ndev, "No usable DMA configuration\n"); + netdev_err(ndev, "Failed to register netdev\n"); goto err; } @@ -1490,14 +1490,17 @@ static int xgene_enet_probe(struct platform_device *pdev) if (ret) goto err; - xgene_enet_napi_add(pdata); mac_ops = pdata->mac_ops; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { ret = xgene_enet_mdio_config(pdata); - else + if (ret) + goto err; + } else { INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); + } - return ret; + xgene_enet_napi_add(pdata); + return 0; err: unregister_netdev(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c8af3ce3ea38..bd377a6b067d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = { .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index af006b44b2a6..0959e6824cb6 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -37,6 +37,7 @@ #define ALX_DEV_ID_AR8161 0x1091 #define ALX_DEV_ID_E2200 0xe091 +#define ALX_DEV_ID_E2400 0xe0a1 #define ALX_DEV_ID_AR8162 0x1090 #define ALX_DEV_ID_AR8171 0x10A1 #define ALX_DEV_ID_AR8172 0x10A0 diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig new file mode 100644 index 000000000000..a3c7106fdf85 --- /dev/null +++ b/drivers/net/ethernet/aurora/Kconfig @@ -0,0 +1,20 @@ +config NET_VENDOR_AURORA + bool "Aurora VLSI devices" + help + If you have a network (Ethernet) device belonging to this class, + say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + questions about Aurora devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_AURORA + +config AURORA_NB8800 + tristate "Aurora AU-NB8800 support" + select PHYLIB + help + Support for the AU-NB8800 gigabit Ethernet controller. + +endif diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile new file mode 100644 index 000000000000..6cb528a2fc26 --- /dev/null +++ b/drivers/net/ethernet/aurora/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_AURORA_NB8800) += nb8800.o diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c new file mode 100644 index 000000000000..ecc4a334c507 --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -0,0 +1,1552 @@ +/* + * Copyright (C) 2015 Mans Rullgard <mans@mansr.com> + * + * Mostly rewritten, based on driver from Sigma Designs. Original + * copyright notice below. + * + * + * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac. + * + * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/dma-mapping.h> +#include <linux/phy.h> +#include <linux/cache.h> +#include <linux/jiffies.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <asm/barrier.h> + +#include "nb8800.h" + +static void nb8800_tx_done(struct net_device *dev); +static int nb8800_dma_stop(struct net_device *dev); + +static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg) +{ + return readb_relaxed(priv->base + reg); +} + +static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg) +{ + return readl_relaxed(priv->base + reg); +} + +static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val) +{ + writeb_relaxed(val, priv->base + reg); +} + +static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val) +{ + writew_relaxed(val, priv->base + reg); +} + +static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val) +{ + writel_relaxed(val, priv->base + reg); +} + +static inline void nb8800_maskb(struct nb8800_priv *priv, int reg, + u32 mask, u32 val) +{ + u32 old = nb8800_readb(priv, reg); + u32 new = (old & ~mask) | (val & mask); + + if (new != old) + nb8800_writeb(priv, reg, new); +} + +static inline void nb8800_maskl(struct nb8800_priv *priv, int reg, + u32 mask, u32 val) +{ + u32 old = nb8800_readl(priv, reg); + u32 new = (old & ~mask) | (val & mask); + + if (new != old) + nb8800_writel(priv, reg, new); +} + +static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits, + bool set) +{ + nb8800_maskb(priv, reg, bits, set ? bits : 0); +} + +static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits) +{ + nb8800_maskb(priv, reg, bits, bits); +} + +static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits) +{ + nb8800_maskb(priv, reg, bits, 0); +} + +static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits, + bool set) +{ + nb8800_maskl(priv, reg, bits, set ? bits : 0); +} + +static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits) +{ + nb8800_maskl(priv, reg, bits, bits); +} + +static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits) +{ + nb8800_maskl(priv, reg, bits, 0); +} + +static int nb8800_mdio_wait(struct mii_bus *bus) +{ + struct nb8800_priv *priv = bus->priv; + u32 val; + + return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD, + val, !(val & MDIO_CMD_GO), 1, 1000); +} + +static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd) +{ + struct nb8800_priv *priv = bus->priv; + int err; + + err = nb8800_mdio_wait(bus); + if (err) + return err; + + nb8800_writel(priv, NB8800_MDIO_CMD, cmd); + udelay(10); + nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO); + + return nb8800_mdio_wait(bus); +} + +static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct nb8800_priv *priv = bus->priv; + u32 val; + int err; + + err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg)); + if (err) + return err; + + val = nb8800_readl(priv, NB8800_MDIO_STS); + if (val & MDIO_STS_ERR) + return 0xffff; + + return val & 0xffff; +} + +static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) | + MDIO_CMD_DATA(val) | MDIO_CMD_WR; + + return nb8800_mdio_cmd(bus, cmd); +} + +static void nb8800_mac_tx(struct net_device *dev, bool enable) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN) + cpu_relax(); + + nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable); +} + +static void nb8800_mac_rx(struct net_device *dev, bool enable) +{ + nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable); +} + +static void nb8800_mac_af(struct net_device *dev, bool enable) +{ + nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable); +} + +static void nb8800_start_rx(struct net_device *dev) +{ + nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN); +} + +static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; + struct nb8800_rx_buf *rxb = &priv->rx_bufs[i]; + int size = L1_CACHE_ALIGN(RX_BUF_SIZE); + dma_addr_t dma_addr; + struct page *page; + unsigned long offset; + void *data; + + data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size); + if (!data) + return -ENOMEM; + + page = virt_to_head_page(data); + offset = data - page_address(page); + + dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE, + DMA_FROM_DEVICE); + + if (dma_mapping_error(&dev->dev, dma_addr)) { + skb_free_frag(data); + return -ENOMEM; + } + + rxb->page = page; + rxb->offset = offset; + rxd->desc.s_addr = dma_addr; + + return 0; +} + +static void nb8800_receive(struct net_device *dev, unsigned int i, + unsigned int len) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; + struct page *page = priv->rx_bufs[i].page; + int offset = priv->rx_bufs[i].offset; + void *data = page_address(page) + offset; + dma_addr_t dma = rxd->desc.s_addr; + struct sk_buff *skb; + unsigned int size; + int err; + + size = len <= RX_COPYBREAK ? len : RX_COPYHDR; + + skb = napi_alloc_skb(&priv->napi, size); + if (!skb) { + netdev_err(dev, "rx skb allocation failed\n"); + dev->stats.rx_dropped++; + return; + } + + if (len <= RX_COPYBREAK) { + dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE); + memcpy(skb_put(skb, len), data, len); + dma_sync_single_for_device(&dev->dev, dma, len, + DMA_FROM_DEVICE); + } else { + err = nb8800_alloc_rx(dev, i, true); + if (err) { + netdev_err(dev, "rx buffer allocation failed\n"); + dev->stats.rx_dropped++; + return; + } + + dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE); + memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + offset + RX_COPYHDR, len - RX_COPYHDR, + RX_BUF_SIZE); + } + + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&priv->napi, skb); +} + +static void nb8800_rx_error(struct net_device *dev, u32 report) +{ + if (report & RX_LENGTH_ERR) + dev->stats.rx_length_errors++; + + if (report & RX_FCS_ERR) + dev->stats.rx_crc_errors++; + + if (report & RX_FIFO_OVERRUN) + dev->stats.rx_fifo_errors++; + + if (report & RX_ALIGNMENT_ERROR) + dev->stats.rx_frame_errors++; + + dev->stats.rx_errors++; +} + +static int nb8800_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd; + unsigned int last = priv->rx_eoc; + unsigned int next; + int work = 0; + + nb8800_tx_done(dev); + +again: + while (work < budget) { + struct nb8800_rx_buf *rxb; + unsigned int len; + + next = (last + 1) % RX_DESC_COUNT; + + rxb = &priv->rx_bufs[next]; + rxd = &priv->rx_descs[next]; + + if (!rxd->report) + break; + + len = RX_BYTES_TRANSFERRED(rxd->report); + + if (IS_RX_ERROR(rxd->report)) + nb8800_rx_error(dev, rxd->report); + else + nb8800_receive(dev, next, len); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + if (rxd->report & RX_MULTICAST_PKT) + dev->stats.multicast++; + + rxd->report = 0; + last = next; + work++; + } + + if (work) { + priv->rx_descs[last].desc.config |= DESC_EOC; + wmb(); /* ensure new EOC is written before clearing old */ + priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC; + priv->rx_eoc = last; + nb8800_start_rx(dev); + } + + if (work < budget) { + nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); + + /* If a packet arrived after we last checked but + * before writing RX_ITR, the interrupt will be + * delayed, so we retrieve it now. + */ + if (priv->rx_descs[next].report) + goto again; + + napi_complete_done(napi, work); + } + + return work; +} + +static void __nb8800_tx_dma_start(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_tx_buf *txb; + u32 txc_cr; + + txb = &priv->tx_bufs[priv->tx_queue]; + if (!txb->ready) + return; + + txc_cr = nb8800_readl(priv, NB8800_TXC_CR); + if (txc_cr & TCR_EN) + return; + + nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); + wmb(); /* ensure desc addr is written before starting DMA */ + nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN); + + priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT; +} + +static void nb8800_tx_dma_start(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->tx_lock); + __nb8800_tx_dma_start(dev); + spin_unlock_irq(&priv->tx_lock); +} + +static void nb8800_tx_dma_start_irq(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + spin_lock(&priv->tx_lock); + __nb8800_tx_dma_start(dev); + spin_unlock(&priv->tx_lock); +} + +static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_tx_desc *txd; + struct nb8800_tx_buf *txb; + struct nb8800_dma_desc *desc; + dma_addr_t dma_addr; + unsigned int dma_len; + unsigned int align; + unsigned int next; + + if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + align = (8 - (uintptr_t)skb->data) & 7; + + dma_len = skb->len - align; + dma_addr = dma_map_single(&dev->dev, skb->data + align, + dma_len, DMA_TO_DEVICE); + + if (dma_mapping_error(&dev->dev, dma_addr)) { + netdev_err(dev, "tx dma mapping error\n"); + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { + netif_stop_queue(dev); + skb->xmit_more = 0; + } + + next = priv->tx_next; + txb = &priv->tx_bufs[next]; + txd = &priv->tx_descs[next]; + desc = &txd->desc[0]; + + next = (next + 1) % TX_DESC_COUNT; + + if (align) { + memcpy(txd->buf, skb->data, align); + + desc->s_addr = + txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); + desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]); + desc->config = DESC_BTS(2) | DESC_DS | align; + + desc++; + } + + desc->s_addr = dma_addr; + desc->n_addr = priv->tx_bufs[next].dma_desc; + desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; + + if (!skb->xmit_more) + desc->config |= DESC_EOC; + + txb->skb = skb; + txb->dma_addr = dma_addr; + txb->dma_len = dma_len; + + if (!priv->tx_chain) { + txb->chain_len = 1; + priv->tx_chain = txb; + } else { + priv->tx_chain->chain_len++; + } + + netdev_sent_queue(dev, skb->len); + + priv->tx_next = next; + + if (!skb->xmit_more) { + smp_wmb(); + priv->tx_chain->ready = true; + priv->tx_chain = NULL; + nb8800_tx_dma_start(dev); + } + + return NETDEV_TX_OK; +} + +static void nb8800_tx_error(struct net_device *dev, u32 report) +{ + if (report & TX_LATE_COLLISION) + dev->stats.collisions++; + + if (report & TX_PACKET_DROPPED) + dev->stats.tx_dropped++; + + if (report & TX_FIFO_UNDERRUN) + dev->stats.tx_fifo_errors++; + + dev->stats.tx_errors++; +} + +static void nb8800_tx_done(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + unsigned int limit = priv->tx_next; + unsigned int done = priv->tx_done; + unsigned int packets = 0; + unsigned int len = 0; + + while (done != limit) { + struct nb8800_tx_desc *txd = &priv->tx_descs[done]; + struct nb8800_tx_buf *txb = &priv->tx_bufs[done]; + struct sk_buff *skb; + + if (!txd->report) + break; + + skb = txb->skb; + len += skb->len; + + dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len, + DMA_TO_DEVICE); + + if (IS_TX_ERROR(txd->report)) { + nb8800_tx_error(dev, txd->report); + kfree_skb(skb); + } else { + consume_skb(skb); + } + + dev->stats.tx_packets++; + dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report); + dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report); + + txb->skb = NULL; + txb->ready = false; + txd->report = 0; + + done = (done + 1) % TX_DESC_COUNT; + packets++; + } + + if (packets) { + smp_mb__before_atomic(); + atomic_add(packets, &priv->tx_free); + netdev_completed_queue(dev, packets, len); + netif_wake_queue(dev); + priv->tx_done = done; + } +} + +static irqreturn_t nb8800_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct nb8800_priv *priv = netdev_priv(dev); + irqreturn_t ret = IRQ_NONE; + u32 val; + + /* tx interrupt */ + val = nb8800_readl(priv, NB8800_TXC_SR); + if (val) { + nb8800_writel(priv, NB8800_TXC_SR, val); + + if (val & TSR_DI) + nb8800_tx_dma_start_irq(dev); + + if (val & TSR_TI) + napi_schedule_irqoff(&priv->napi); + + if (unlikely(val & TSR_DE)) + netdev_err(dev, "TX DMA error\n"); + + /* should never happen with automatic status retrieval */ + if (unlikely(val & TSR_TO)) + netdev_err(dev, "TX Status FIFO overflow\n"); + + ret = IRQ_HANDLED; + } + + /* rx interrupt */ + val = nb8800_readl(priv, NB8800_RXC_SR); + if (val) { + nb8800_writel(priv, NB8800_RXC_SR, val); + + if (likely(val & (RSR_RI | RSR_DI))) { + nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll); + napi_schedule_irqoff(&priv->napi); + } + + if (unlikely(val & RSR_DE)) + netdev_err(dev, "RX DMA error\n"); + + /* should never happen with automatic status retrieval */ + if (unlikely(val & RSR_RO)) + netdev_err(dev, "RX Status FIFO overflow\n"); + + ret = IRQ_HANDLED; + } + + return ret; +} + +static void nb8800_mac_config(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + bool gigabit = priv->speed == SPEED_1000; + u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE; + u32 mac_mode = 0; + u32 slot_time; + u32 phy_clk; + u32 ict; + + if (!priv->duplex) + mac_mode |= HALF_DUPLEX; + + if (gigabit) { + if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) + mac_mode |= RGMII_MODE; + + mac_mode |= GMAC_MODE; + phy_clk = 125000000; + + /* Should be 512 but register is only 8 bits */ + slot_time = 255; + } else { + phy_clk = 25000000; + slot_time = 128; + } + + ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk)); + + nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict); + nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time); + nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode); +} + +static void nb8800_pause_config(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + u32 rxcr; + + if (priv->pause_aneg) { + if (!phydev || !phydev->link) + return; + + priv->pause_rx = phydev->pause; + priv->pause_tx = phydev->pause ^ phydev->asym_pause; + } + + nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx); + + rxcr = nb8800_readl(priv, NB8800_RXC_CR); + if (!!(rxcr & RCR_FL) == priv->pause_tx) + return; + + if (netif_running(dev)) { + napi_disable(&priv->napi); + netif_tx_lock_bh(dev); + nb8800_dma_stop(dev); + nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); + nb8800_start_rx(dev); + netif_tx_unlock_bh(dev); + napi_enable(&priv->napi); + } else { + nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); + } +} + +static void nb8800_link_reconfigure(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + int change = 0; + + if (phydev->link) { + if (phydev->speed != priv->speed) { + priv->speed = phydev->speed; + change = 1; + } + + if (phydev->duplex != priv->duplex) { + priv->duplex = phydev->duplex; + change = 1; + } + + if (change) + nb8800_mac_config(dev); + + nb8800_pause_config(dev); + } + + if (phydev->link != priv->link) { + priv->link = phydev->link; + change = 1; + } + + if (change) + phy_print_status(priv->phydev); +} + +static void nb8800_update_mac_addr(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < ETH_ALEN; i++) + nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]); + + for (i = 0; i < ETH_ALEN; i++) + nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]); +} + +static int nb8800_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sock = addr; + + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, sock->sa_data); + nb8800_update_mac_addr(dev); + + return 0; +} + +static void nb8800_mc_init(struct net_device *dev, int val) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + nb8800_writeb(priv, NB8800_MC_INIT, val); + readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val, + 1, 1000); +} + +static void nb8800_set_rx_mode(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + nb8800_mac_af(dev, false); + return; + } + + nb8800_mac_af(dev, true); + nb8800_mc_init(dev, 0); + + netdev_for_each_mc_addr(ha, dev) { + for (i = 0; i < ETH_ALEN; i++) + nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]); + + nb8800_mc_init(dev, 0xff); + } +} + +#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc)) +#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc)) + +static void nb8800_dma_free(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + unsigned int i; + + if (priv->rx_bufs) { + for (i = 0; i < RX_DESC_COUNT; i++) + if (priv->rx_bufs[i].page) + put_page(priv->rx_bufs[i].page); + + kfree(priv->rx_bufs); + priv->rx_bufs = NULL; + } + + if (priv->tx_bufs) { + for (i = 0; i < TX_DESC_COUNT; i++) + kfree_skb(priv->tx_bufs[i].skb); + + kfree(priv->tx_bufs); + priv->tx_bufs = NULL; + } + + if (priv->rx_descs) { + dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs, + priv->rx_desc_dma); + priv->rx_descs = NULL; + } + + if (priv->tx_descs) { + dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs, + priv->tx_desc_dma); + priv->tx_descs = NULL; + } +} + +static void nb8800_dma_reset(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_rx_desc *rxd; + struct nb8800_tx_desc *txd; + unsigned int i; + + for (i = 0; i < RX_DESC_COUNT; i++) { + dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd); + + rxd = &priv->rx_descs[i]; + rxd->desc.n_addr = rx_dma + sizeof(*rxd); + rxd->desc.r_addr = + rx_dma + offsetof(struct nb8800_rx_desc, report); + rxd->desc.config = priv->rx_dma_config; + rxd->report = 0; + } + + rxd->desc.n_addr = priv->rx_desc_dma; + rxd->desc.config |= DESC_EOC; + + priv->rx_eoc = RX_DESC_COUNT - 1; + + for (i = 0; i < TX_DESC_COUNT; i++) { + struct nb8800_tx_buf *txb = &priv->tx_bufs[i]; + dma_addr_t r_dma = txb->dma_desc + + offsetof(struct nb8800_tx_desc, report); + + txd = &priv->tx_descs[i]; + txd->desc[0].r_addr = r_dma; + txd->desc[1].r_addr = r_dma; + txd->report = 0; + } + + priv->tx_next = 0; + priv->tx_queue = 0; + priv->tx_done = 0; + atomic_set(&priv->tx_free, TX_DESC_COUNT); + + nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma); + + wmb(); /* ensure all setup is written before starting */ +} + +static int nb8800_dma_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + unsigned int n_rx = RX_DESC_COUNT; + unsigned int n_tx = TX_DESC_COUNT; + unsigned int i; + int err; + + priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE, + &priv->rx_desc_dma, GFP_KERNEL); + if (!priv->rx_descs) + goto err_out; + + priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL); + if (!priv->rx_bufs) + goto err_out; + + for (i = 0; i < n_rx; i++) { + err = nb8800_alloc_rx(dev, i, false); + if (err) + goto err_out; + } + + priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE, + &priv->tx_desc_dma, GFP_KERNEL); + if (!priv->tx_descs) + goto err_out; + + priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL); + if (!priv->tx_bufs) + goto err_out; + + for (i = 0; i < n_tx; i++) + priv->tx_bufs[i].dma_desc = + priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc); + + nb8800_dma_reset(dev); + + return 0; + +err_out: + nb8800_dma_free(dev); + + return -ENOMEM; +} + +static int nb8800_dma_stop(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + struct nb8800_tx_buf *txb = &priv->tx_bufs[0]; + struct nb8800_tx_desc *txd = &priv->tx_descs[0]; + int retry = 5; + u32 txcr; + u32 rxcr; + int err; + unsigned int i; + + /* wait for tx to finish */ + err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr, + !(txcr & TCR_EN) && + priv->tx_done == priv->tx_next, + 1000, 1000000); + if (err) + return err; + + /* The rx DMA only stops if it reaches the end of chain. + * To make this happen, we set the EOC flag on all rx + * descriptors, put the device in loopback mode, and send + * a few dummy frames. The interrupt handler will ignore + * these since NAPI is disabled and no real frames are in + * the tx queue. + */ + + for (i = 0; i < RX_DESC_COUNT; i++) + priv->rx_descs[i].desc.config |= DESC_EOC; + + txd->desc[0].s_addr = + txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); + txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8; + memset(txd->buf, 0, sizeof(txd->buf)); + + nb8800_mac_af(dev, false); + nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN); + + do { + nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); + wmb(); + nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN); + + err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR, + rxcr, !(rxcr & RCR_EN), + 1000, 100000); + } while (err && --retry); + + nb8800_mac_af(dev, true); + nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN); + nb8800_dma_reset(dev); + + return retry ? 0 : -ETIMEDOUT; +} + +static void nb8800_pause_adv(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + u32 adv = 0; + + if (!priv->phydev) + return; + + if (priv->pause_rx) + adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + if (priv->pause_tx) + adv ^= ADVERTISED_Asym_Pause; + + priv->phydev->supported |= adv; + priv->phydev->advertising |= adv; +} + +static int nb8800_open(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int err; + + /* clear any pending interrupts */ + nb8800_writel(priv, NB8800_RXC_SR, 0xf); + nb8800_writel(priv, NB8800_TXC_SR, 0xf); + + err = nb8800_dma_init(dev); + if (err) + return err; + + err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev); + if (err) + goto err_free_dma; + + nb8800_mac_rx(dev, true); + nb8800_mac_tx(dev, true); + + priv->phydev = of_phy_connect(dev, priv->phy_node, + nb8800_link_reconfigure, 0, + priv->phy_mode); + if (!priv->phydev) + goto err_free_irq; + + nb8800_pause_adv(dev); + + netdev_reset_queue(dev); + napi_enable(&priv->napi); + netif_start_queue(dev); + + nb8800_start_rx(dev); + phy_start(priv->phydev); + + return 0; + +err_free_irq: + free_irq(dev->irq, dev); +err_free_dma: + nb8800_dma_free(dev); + + return err; +} + +static int nb8800_stop(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + phy_stop(priv->phydev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + nb8800_dma_stop(dev); + nb8800_mac_rx(dev, false); + nb8800_mac_tx(dev, false); + + phy_disconnect(priv->phydev); + priv->phydev = NULL; + + free_irq(dev->irq, dev); + + nb8800_dma_free(dev); + + return 0; +} + +static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static const struct net_device_ops nb8800_netdev_ops = { + .ndo_open = nb8800_open, + .ndo_stop = nb8800_stop, + .ndo_start_xmit = nb8800_xmit, + .ndo_set_mac_address = nb8800_set_mac_address, + .ndo_set_rx_mode = nb8800_set_rx_mode, + .ndo_do_ioctl = nb8800_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int nb8800_nway_reset(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return genphy_restart_aneg(priv->phydev); +} + +static void nb8800_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pp) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + pp->autoneg = priv->pause_aneg; + pp->rx_pause = priv->pause_rx; + pp->tx_pause = priv->pause_tx; +} + +static int nb8800_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pp) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + priv->pause_aneg = pp->autoneg; + priv->pause_rx = pp->rx_pause; + priv->pause_tx = pp->tx_pause; + + nb8800_pause_adv(dev); + + if (!priv->pause_aneg) + nb8800_pause_config(dev); + else if (priv->phydev) + phy_start_aneg(priv->phydev); + + return 0; +} + +static const char nb8800_stats_names[][ETH_GSTRING_LEN] = { + "rx_bytes_ok", + "rx_frames_ok", + "rx_undersize_frames", + "rx_fragment_frames", + "rx_64_byte_frames", + "rx_127_byte_frames", + "rx_255_byte_frames", + "rx_511_byte_frames", + "rx_1023_byte_frames", + "rx_max_size_frames", + "rx_oversize_frames", + "rx_bad_fcs_frames", + "rx_broadcast_frames", + "rx_multicast_frames", + "rx_control_frames", + "rx_pause_frames", + "rx_unsup_control_frames", + "rx_align_error_frames", + "rx_overrun_frames", + "rx_jabber_frames", + "rx_bytes", + "rx_frames", + + "tx_bytes_ok", + "tx_frames_ok", + "tx_64_byte_frames", + "tx_127_byte_frames", + "tx_255_byte_frames", + "tx_511_byte_frames", + "tx_1023_byte_frames", + "tx_max_size_frames", + "tx_oversize_frames", + "tx_broadcast_frames", + "tx_multicast_frames", + "tx_control_frames", + "tx_pause_frames", + "tx_underrun_frames", + "tx_single_collision_frames", + "tx_multi_collision_frames", + "tx_deferred_collision_frames", + "tx_late_collision_frames", + "tx_excessive_collision_frames", + "tx_bytes", + "tx_frames", + "tx_collisions", +}; + +#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names) + +static int nb8800_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return NB8800_NUM_STATS; + + return -EOPNOTSUPP; +} + +static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf) +{ + if (sset == ETH_SS_STATS) + memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names)); +} + +static u32 nb8800_read_stat(struct net_device *dev, int index) +{ + struct nb8800_priv *priv = netdev_priv(dev); + + nb8800_writeb(priv, NB8800_STAT_INDEX, index); + + return nb8800_readl(priv, NB8800_STAT_DATA); +} + +static void nb8800_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *st) +{ + unsigned int i; + u32 rx, tx; + + for (i = 0; i < NB8800_NUM_STATS / 2; i++) { + rx = nb8800_read_stat(dev, i); + tx = nb8800_read_stat(dev, i | 0x80); + st[i] = rx; + st[i + NB8800_NUM_STATS / 2] = tx; + } +} + +static const struct ethtool_ops nb8800_ethtool_ops = { + .get_settings = nb8800_get_settings, + .set_settings = nb8800_set_settings, + .nway_reset = nb8800_nway_reset, + .get_link = ethtool_op_get_link, + .get_pauseparam = nb8800_get_pauseparam, + .set_pauseparam = nb8800_set_pauseparam, + .get_sset_count = nb8800_get_sset_count, + .get_strings = nb8800_get_strings, + .get_ethtool_stats = nb8800_get_ethtool_stats, +}; + +static int nb8800_hw_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + u32 val; + + val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS; + nb8800_writeb(priv, NB8800_TX_CTL1, val); + + /* Collision retry count */ + nb8800_writeb(priv, NB8800_TX_CTL2, 5); + + val = RX_PAD_STRIP | RX_AF_EN; + nb8800_writeb(priv, NB8800_RX_CTL, val); + + /* Chosen by fair dice roll */ + nb8800_writeb(priv, NB8800_RANDOM_SEED, 4); + + /* TX cycles per deferral period */ + nb8800_writeb(priv, NB8800_TX_SDP, 12); + + /* The following three threshold values have been + * experimentally determined for good results. + */ + + /* RX/TX FIFO threshold for partial empty (64-bit entries) */ + nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0); + + /* RX/TX FIFO threshold for partial full (64-bit entries) */ + nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255); + + /* Buffer size for transmit (64-bit entries) */ + nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64); + + /* Configure tx DMA */ + + val = nb8800_readl(priv, NB8800_TXC_CR); + val &= TCR_LE; /* keep endian setting */ + val |= TCR_DM; /* DMA descriptor mode */ + val |= TCR_RS; /* automatically store tx status */ + val |= TCR_DIE; /* interrupt on DMA chain completion */ + val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */ + val |= TCR_BTS(2); /* 32-byte bus transaction size */ + nb8800_writel(priv, NB8800_TXC_CR, val); + + /* TX complete interrupt after 10 ms or 7 frames (see above) */ + val = clk_get_rate(priv->clk) / 100; + nb8800_writel(priv, NB8800_TX_ITR, val); + + /* Configure rx DMA */ + + val = nb8800_readl(priv, NB8800_RXC_CR); + val &= RCR_LE; /* keep endian setting */ + val |= RCR_DM; /* DMA descriptor mode */ + val |= RCR_RS; /* automatically store rx status */ + val |= RCR_DIE; /* interrupt at end of DMA chain */ + val |= RCR_RFI(7); /* interrupt after 7 frames received */ + val |= RCR_BTS(2); /* 32-byte bus transaction size */ + nb8800_writel(priv, NB8800_RXC_CR, val); + + /* The rx interrupt can fire before the DMA has completed + * unless a small delay is added. 50 us is hopefully enough. + */ + priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000; + + /* In NAPI poll mode we want to disable interrupts, but the + * hardware does not permit this. Delay 10 ms instead. + */ + priv->rx_itr_poll = clk_get_rate(priv->clk) / 100; + + nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); + + priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF; + + /* Flow control settings */ + + /* Pause time of 0.1 ms */ + val = 100000 / 512; + nb8800_writeb(priv, NB8800_PQ1, val >> 8); + nb8800_writeb(priv, NB8800_PQ2, val & 0xff); + + /* Auto-negotiate by default */ + priv->pause_aneg = true; + priv->pause_rx = true; + priv->pause_tx = true; + + nb8800_mc_init(dev, 0); + + return 0; +} + +static int nb8800_tangox_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + u32 pad_mode = PAD_MODE_MII; + + switch (priv->phy_mode) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + pad_mode = PAD_MODE_MII; + break; + + case PHY_INTERFACE_MODE_RGMII: + pad_mode = PAD_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; + break; + + default: + dev_err(dev->dev.parent, "unsupported phy mode %s\n", + phy_modes(priv->phy_mode)); + return -EINVAL; + } + + nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode); + + return 0; +} + +static int nb8800_tangox_reset(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int clk_div; + + nb8800_writeb(priv, NB8800_TANGOX_RESET, 0); + usleep_range(1000, 10000); + nb8800_writeb(priv, NB8800_TANGOX_RESET, 1); + + wmb(); /* ensure reset is cleared before proceeding */ + + clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK); + nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div); + + return 0; +} + +static const struct nb8800_ops nb8800_tangox_ops = { + .init = nb8800_tangox_init, + .reset = nb8800_tangox_reset, +}; + +static int nb8800_tango4_init(struct net_device *dev) +{ + struct nb8800_priv *priv = netdev_priv(dev); + int err; + + err = nb8800_tangox_init(dev); + if (err) + return err; + + /* On tango4 interrupt on DMA completion per frame works and gives + * better performance despite generating more rx interrupts. + */ + + /* Disable unnecessary interrupt on rx completion */ + nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7)); + + /* Request interrupt on descriptor DMA completion */ + priv->rx_dma_config |= DESC_ID; + + return 0; +} + +static const struct nb8800_ops nb8800_tango4_ops = { + .init = nb8800_tango4_init, + .reset = nb8800_tangox_reset, +}; + +static const struct of_device_id nb8800_dt_ids[] = { + { + .compatible = "aurora,nb8800", + }, + { + .compatible = "sigma,smp8642-ethernet", + .data = &nb8800_tangox_ops, + }, + { + .compatible = "sigma,smp8734-ethernet", + .data = &nb8800_tango4_ops, + }, + { } +}; + +static int nb8800_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct nb8800_ops *ops = NULL; + struct nb8800_priv *priv; + struct resource *res; + struct net_device *dev; + struct mii_bus *bus; + const unsigned char *mac; + void __iomem *base; + int irq; + int ret; + + match = of_match_device(nb8800_dt_ids, &pdev->dev); + if (match) + ops = match->data; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, "No IRQ\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); + + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + priv = netdev_priv(dev); + priv->base = base; + + priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (priv->phy_mode < 0) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + ret = PTR_ERR(priv->clk); + goto err_free_dev; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + goto err_free_dev; + + spin_lock_init(&priv->tx_lock); + + if (ops && ops->reset) { + ret = ops->reset(dev); + if (ret) + goto err_free_dev; + } + + bus = devm_mdiobus_alloc(&pdev->dev); + if (!bus) { + ret = -ENOMEM; + goto err_disable_clk; + } + + bus->name = "nb8800-mii"; + bus->read = nb8800_mdio_read; + bus->write = nb8800_mdio_write; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii", + (unsigned long)res->start); + bus->priv = priv; + + ret = of_mdiobus_register(bus, pdev->dev.of_node); + if (ret) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_disable_clk; + } + + priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_node) { + dev_err(&pdev->dev, "no PHY specified\n"); + ret = -ENODEV; + goto err_free_bus; + } + + priv->mii_bus = bus; + + ret = nb8800_hw_init(dev); + if (ret) + goto err_free_bus; + + if (ops && ops->init) { + ret = ops->init(dev); + if (ret) + goto err_free_bus; + } + + dev->netdev_ops = &nb8800_netdev_ops; + dev->ethtool_ops = &nb8800_ethtool_ops; + dev->flags |= IFF_MULTICAST; + dev->irq = irq; + + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + ether_addr_copy(dev->dev_addr, mac); + + if (!is_valid_ether_addr(dev->dev_addr)) + eth_hw_addr_random(dev); + + nb8800_update_mac_addr(dev); + + netif_carrier_off(dev); + + ret = register_netdev(dev); + if (ret) { + netdev_err(dev, "failed to register netdev\n"); + goto err_free_dma; + } + + netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT); + + netdev_info(dev, "MAC address %pM\n", dev->dev_addr); + + return 0; + +err_free_dma: + nb8800_dma_free(dev); +err_free_bus: + mdiobus_unregister(bus); +err_disable_clk: + clk_disable_unprepare(priv->clk); +err_free_dev: + free_netdev(dev); + + return ret; +} + +static int nb8800_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct nb8800_priv *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + + mdiobus_unregister(priv->mii_bus); + + clk_disable_unprepare(priv->clk); + + nb8800_dma_free(ndev); + free_netdev(ndev); + + return 0; +} + +static struct platform_driver nb8800_driver = { + .driver = { + .name = "nb8800", + .of_match_table = nb8800_dt_ids, + }, + .probe = nb8800_probe, + .remove = nb8800_remove, +}; + +module_platform_driver(nb8800_driver); + +MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver"); +MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h new file mode 100644 index 000000000000..e5adbc2aac9f --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.h @@ -0,0 +1,316 @@ +#ifndef _NB8800_H_ +#define _NB8800_H_ + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/phy.h> +#include <linux/clk.h> +#include <linux/bitops.h> + +#define RX_DESC_COUNT 256 +#define TX_DESC_COUNT 256 + +#define NB8800_DESC_LOW 4 + +#define RX_BUF_SIZE 1552 + +#define RX_COPYBREAK 256 +#define RX_COPYHDR 128 + +#define MAX_MDC_CLOCK 2500000 + +/* Stargate Solutions SSN8800 core registers */ +#define NB8800_TX_CTL1 0x000 +#define TX_TPD BIT(5) +#define TX_APPEND_FCS BIT(4) +#define TX_PAD_EN BIT(3) +#define TX_RETRY_EN BIT(2) +#define TX_EN BIT(0) + +#define NB8800_TX_CTL2 0x001 + +#define NB8800_RX_CTL 0x004 +#define RX_BC_DISABLE BIT(7) +#define RX_RUNT BIT(6) +#define RX_AF_EN BIT(5) +#define RX_PAUSE_EN BIT(3) +#define RX_SEND_CRC BIT(2) +#define RX_PAD_STRIP BIT(1) +#define RX_EN BIT(0) + +#define NB8800_RANDOM_SEED 0x008 +#define NB8800_TX_SDP 0x14 +#define NB8800_TX_TPDP1 0x18 +#define NB8800_TX_TPDP2 0x19 +#define NB8800_SLOT_TIME 0x1c + +#define NB8800_MDIO_CMD 0x020 +#define MDIO_CMD_GO BIT(31) +#define MDIO_CMD_WR BIT(26) +#define MDIO_CMD_ADDR(x) ((x) << 21) +#define MDIO_CMD_REG(x) ((x) << 16) +#define MDIO_CMD_DATA(x) ((x) << 0) + +#define NB8800_MDIO_STS 0x024 +#define MDIO_STS_ERR BIT(31) + +#define NB8800_MC_ADDR(i) (0x028 + (i)) +#define NB8800_MC_INIT 0x02e +#define NB8800_UC_ADDR(i) (0x03c + (i)) + +#define NB8800_MAC_MODE 0x044 +#define RGMII_MODE BIT(7) +#define HALF_DUPLEX BIT(4) +#define BURST_EN BIT(3) +#define LOOPBACK_EN BIT(2) +#define GMAC_MODE BIT(0) + +#define NB8800_IC_THRESHOLD 0x050 +#define NB8800_PE_THRESHOLD 0x051 +#define NB8800_PF_THRESHOLD 0x052 +#define NB8800_TX_BUFSIZE 0x054 +#define NB8800_FIFO_CTL 0x056 +#define NB8800_PQ1 0x060 +#define NB8800_PQ2 0x061 +#define NB8800_SRC_ADDR(i) (0x06a + (i)) +#define NB8800_STAT_DATA 0x078 +#define NB8800_STAT_INDEX 0x07c +#define NB8800_STAT_CLEAR 0x07d + +#define NB8800_SLEEP_MODE 0x07e +#define SLEEP_MODE BIT(0) + +#define NB8800_WAKEUP 0x07f +#define WAKEUP BIT(0) + +/* Aurora NB8800 host interface registers */ +#define NB8800_TXC_CR 0x100 +#define TCR_LK BIT(12) +#define TCR_DS BIT(11) +#define TCR_BTS(x) (((x) & 0x7) << 8) +#define TCR_DIE BIT(7) +#define TCR_TFI(x) (((x) & 0x7) << 4) +#define TCR_LE BIT(3) +#define TCR_RS BIT(2) +#define TCR_DM BIT(1) +#define TCR_EN BIT(0) + +#define NB8800_TXC_SR 0x104 +#define TSR_DE BIT(3) +#define TSR_DI BIT(2) +#define TSR_TO BIT(1) +#define TSR_TI BIT(0) + +#define NB8800_TX_SAR 0x108 +#define NB8800_TX_DESC_ADDR 0x10c + +#define NB8800_TX_REPORT_ADDR 0x110 +#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff) +#define TX_FIRST_DEFERRAL BIT(7) +#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf) +#define TX_LATE_COLLISION BIT(2) +#define TX_PACKET_DROPPED BIT(1) +#define TX_FIFO_UNDERRUN BIT(0) +#define IS_TX_ERROR(r) ((r) & 0x07) + +#define NB8800_TX_FIFO_SR 0x114 +#define NB8800_TX_ITR 0x118 + +#define NB8800_RXC_CR 0x200 +#define RCR_FL BIT(13) +#define RCR_LK BIT(12) +#define RCR_DS BIT(11) +#define RCR_BTS(x) (((x) & 7) << 8) +#define RCR_DIE BIT(7) +#define RCR_RFI(x) (((x) & 7) << 4) +#define RCR_LE BIT(3) +#define RCR_RS BIT(2) +#define RCR_DM BIT(1) +#define RCR_EN BIT(0) + +#define NB8800_RXC_SR 0x204 +#define RSR_DE BIT(3) +#define RSR_DI BIT(2) +#define RSR_RO BIT(1) +#define RSR_RI BIT(0) + +#define NB8800_RX_SAR 0x208 +#define NB8800_RX_DESC_ADDR 0x20c + +#define NB8800_RX_REPORT_ADDR 0x210 +#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF) +#define RX_MULTICAST_PKT BIT(9) +#define RX_BROADCAST_PKT BIT(8) +#define RX_LENGTH_ERR BIT(7) +#define RX_FCS_ERR BIT(6) +#define RX_RUNT_PKT BIT(5) +#define RX_FIFO_OVERRUN BIT(4) +#define RX_LATE_COLLISION BIT(3) +#define RX_ALIGNMENT_ERROR BIT(2) +#define RX_ERROR_MASK 0xfc +#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK) + +#define NB8800_RX_FIFO_SR 0x214 +#define NB8800_RX_ITR 0x218 + +/* Sigma Designs SMP86xx additional registers */ +#define NB8800_TANGOX_PAD_MODE 0x400 +#define PAD_MODE_MASK 0x7 +#define PAD_MODE_MII 0x0 +#define PAD_MODE_RGMII 0x1 +#define PAD_MODE_GTX_CLK_INV BIT(3) +#define PAD_MODE_GTX_CLK_DELAY BIT(4) + +#define NB8800_TANGOX_MDIO_CLKDIV 0x420 +#define NB8800_TANGOX_RESET 0x424 + +/* Hardware DMA descriptor */ +struct nb8800_dma_desc { + u32 s_addr; /* start address */ + u32 n_addr; /* next descriptor address */ + u32 r_addr; /* report address */ + u32 config; +} __aligned(8); + +#define DESC_ID BIT(23) +#define DESC_EOC BIT(22) +#define DESC_EOF BIT(21) +#define DESC_LK BIT(20) +#define DESC_DS BIT(19) +#define DESC_BTS(x) (((x) & 0x7) << 16) + +/* DMA descriptor and associated data for rx. + * Allocated from coherent memory. + */ +struct nb8800_rx_desc { + /* DMA descriptor */ + struct nb8800_dma_desc desc; + + /* Status report filled in by hardware */ + u32 report; +}; + +/* Address of buffer on rx ring */ +struct nb8800_rx_buf { + struct page *page; + unsigned long offset; +}; + +/* DMA descriptors and associated data for tx. + * Allocated from coherent memory. + */ +struct nb8800_tx_desc { + /* DMA descriptor. The second descriptor is used if packet + * data is unaligned. + */ + struct nb8800_dma_desc desc[2]; + + /* Status report filled in by hardware */ + u32 report; + + /* Bounce buffer for initial unaligned part of packet */ + u8 buf[8] __aligned(8); +}; + +/* Packet in tx queue */ +struct nb8800_tx_buf { + /* Currently queued skb */ + struct sk_buff *skb; + + /* DMA address of the first descriptor */ + dma_addr_t dma_desc; + + /* DMA address of packet data */ + dma_addr_t dma_addr; + + /* Length of DMA mapping, less than skb->len if alignment + * buffer is used. + */ + unsigned int dma_len; + + /* Number of packets in chain starting here */ + unsigned int chain_len; + + /* Packet chain ready to be submitted to hardware */ + bool ready; +}; + +struct nb8800_priv { + struct napi_struct napi; + + void __iomem *base; + + /* RX DMA descriptors */ + struct nb8800_rx_desc *rx_descs; + + /* RX buffers referenced by DMA descriptors */ + struct nb8800_rx_buf *rx_bufs; + + /* Current end of chain */ + u32 rx_eoc; + + /* Value for rx interrupt time register in NAPI interrupt mode */ + u32 rx_itr_irq; + + /* Value for rx interrupt time register in NAPI poll mode */ + u32 rx_itr_poll; + + /* Value for config field of rx DMA descriptors */ + u32 rx_dma_config; + + /* TX DMA descriptors */ + struct nb8800_tx_desc *tx_descs; + + /* TX packet queue */ + struct nb8800_tx_buf *tx_bufs; + + /* Number of free tx queue entries */ + atomic_t tx_free; + + /* First free tx queue entry */ + u32 tx_next; + + /* Next buffer to transmit */ + u32 tx_queue; + + /* Start of current packet chain */ + struct nb8800_tx_buf *tx_chain; + + /* Next buffer to reclaim */ + u32 tx_done; + + /* Lock for DMA activation */ + spinlock_t tx_lock; + + struct mii_bus *mii_bus; + struct device_node *phy_node; + struct phy_device *phydev; + + /* PHY connection type from DT */ + int phy_mode; + + /* Current link status */ + int speed; + int duplex; + int link; + + /* Pause settings */ + bool pause_aneg; + bool pause_rx; + bool pause_tx; + + /* DMA base address of rx descriptors, see rx_descs above */ + dma_addr_t rx_desc_dma; + + /* DMA base address of tx descriptors, see tx_descs above */ + dma_addr_t tx_desc_dma; + + struct clk *clk; +}; + +struct nb8800_ops { + int (*init)(struct net_device *dev); + int (*reset)(struct net_device *dev); +}; + +#endif /* _NB8800_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f1d62d5dbaff..2e611dc5f162 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); return; } - bp->vxlan_dst_port--; - if (bp->vxlan_dst_port) + bp->vxlan_dst_port_count--; + if (bp->vxlan_dst_port_count) return; if (netif_running(bp->dev)) { @@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, /* VF with OLD Hypervisor or old PF do not support filtering */ if (IS_PF(bp)) { - if (CHIP_IS_E1x(bp)) + if (chip_is_e1x) bp->accept_any_vlan = true; else dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index db15c5ee09c5..bdf094fb6ef9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3625,6 +3625,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); @@ -3648,8 +3649,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) vf->fw_fid = le16_to_cpu(resp->fid); memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); - if (!is_valid_ether_addr(vf->mac_addr)) - random_ether_addr(vf->mac_addr); + if (is_valid_ether_addr(vf->mac_addr)) + /* overwrite netdev dev_adr with admin VF MAC */ + memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + else + random_ether_addr(bp->dev->dev_addr); vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); @@ -3880,6 +3884,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) #endif } +static int bnxt_cfg_rx_mode(struct bnxt *); + static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { int rc = 0; @@ -3946,11 +3952,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) bp->vnic_info[0].rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; - rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); - if (rc) { - netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); + rc = bnxt_cfg_rx_mode(bp); + if (rc) goto err_out; - } rc = bnxt_hwrm_set_coal(bp); if (rc) @@ -4865,7 +4869,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) } } -static void bnxt_cfg_rx_mode(struct bnxt *bp) +static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; @@ -4914,6 +4918,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp) netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); vnic->uc_filter_count = i; + return rc; } } @@ -4922,6 +4927,8 @@ skip_uc: if (rc) netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); + + return rc; } static netdev_features_t bnxt_fix_features(struct net_device *dev, @@ -5212,13 +5219,27 @@ init_err: static int bnxt_change_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; + struct bnxt *bp = netdev_priv(dev); + int rc = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) + return -EADDRNOTAVAIL; +#endif + + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (netif_running(dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } - return 0; + return rc; } /* rtnl_lock held */ @@ -5686,15 +5707,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); dflt_rings = netif_get_num_default_rss_queues(); - if (BNXT_PF(bp)) { - memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + if (BNXT_PF(bp)) bp->pf.max_irqs = max_irqs; - } else { #if defined(CONFIG_BNXT_SRIOV) - memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + else bp->vf.max_irqs = max_irqs; #endif - } bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f4cf68861069..7a9af2887d8e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (!is_valid_ether_addr(resp->perm_mac_address)) goto update_vf_mac_exit; - if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) - goto update_vf_mac_exit; - - memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) + memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 88c1e1a834f8..169059c92f80 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp) macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) + config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ config |= MACB_BIT(PAE); /* PAuse Enable */ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ @@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev) /* Set MII management clock divider */ val = macb_mdc_clk_div(bp); val |= macb_dbw(bp); + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) + val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); macb_writel(bp, NCFGR, val); return 0; diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 6e1faea00ca8..d83b0db77821 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -215,12 +215,17 @@ /* GEM specific NCFGR bitfields. */ #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ #define GEM_GBE_SIZE 1 +#define GEM_PCSSEL_OFFSET 11 +#define GEM_PCSSEL_SIZE 1 #define GEM_CLK_OFFSET 18 /* MDC clock division */ #define GEM_CLK_SIZE 3 #define GEM_DBW_OFFSET 21 /* Data bus width */ #define GEM_DBW_SIZE 2 #define GEM_RXCOEN_OFFSET 24 #define GEM_RXCOEN_SIZE 1 +#define GEM_SGMIIEN_OFFSET 27 +#define GEM_SGMIIEN_SIZE 1 + /* Constants for data bus width. */ #define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index f683d97d7614..b89504405b72 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev) #endif /* For PCI-E Advanced Error Recovery (AER) Interface */ -static struct pci_error_handlers liquidio_err_handler = { +static const struct pci_error_handlers liquidio_err_handler = { .error_detected = liquidio_pcie_error_detected, .mmio_enabled = liquidio_pcie_mmio_enabled, .slot_reset = liquidio_pcie_slot_reset, diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index d3950b20feb9..39ca6744a4e6 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -120,10 +120,9 @@ * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected * - * 1 tick per 0.05usec = value of 2.2 - * This 10% would be covered in CQ timer thresh value + * 1 tick per 0.025usec */ -#define NICPF_CLK_PER_INT_TICK 2 +#define NICPF_CLK_PER_INT_TICK 1 /* Time to wait before we decide that a SQ is stuck. * diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index c561fdcb79a7..4b7fd63ae57c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -37,6 +37,7 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 vf_lmac_map[MAX_LMAC]; + u8 lmac_cnt; struct delayed_work dwork; struct workqueue_struct *check_link; u8 link[MAX_LMAC]; @@ -279,6 +280,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) u64 lmac_credit; nic->num_vf_en = 0; + nic->lmac_cnt = 0; for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { if (!(bgx_map & (1 << bgx))) @@ -288,6 +290,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic->vf_lmac_map[next_bgx_lmac++] = NIC_SET_VF_LMAC_MAP(bgx, lmac); nic->num_vf_en += lmac_cnt; + nic->lmac_cnt += lmac_cnt; /* Program LMAC credits */ lmac_credit = (1ull << 1); /* channel credit enable */ @@ -715,6 +718,13 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ nic->vf_enabled[vf] = true; + if (vf >= nic->lmac_cnt) + goto unlock; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); goto unlock; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ @@ -722,6 +732,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) if (vf >= nic->num_vf_en) nic->sqs_used[vf - nic->num_vf_en] = false; nic->pqs_vf[vf] = 0; + + if (vf >= nic->lmac_cnt) + break; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); @@ -940,7 +958,7 @@ static void nic_poll_for_link(struct work_struct *work) mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - for (vf = 0; vf < nic->num_vf_en; vf++) { + for (vf = 0; vf < nic->lmac_cnt; vf++) { /* Poll only if VF is UP */ if (!nic->vf_enabled[vf]) continue; @@ -1074,8 +1092,7 @@ static void nic_remove(struct pci_dev *pdev) if (nic->check_link) { /* Destroy work Queue */ - cancel_delayed_work(&nic->dwork); - flush_workqueue(nic->check_link); + cancel_delayed_work_sync(&nic->dwork); destroy_workqueue(nic->check_link); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index af54c10945c2..a12b2e38cf61 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev, cmd->supported = 0; cmd->transceiver = XCVR_EXTERNAL; + + if (!nic->link_up) { + cmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + return 0; + } + if (nic->speed <= 1000) { cmd->port = PORT_MII; cmd->autoneg = AUTONEG_ENABLE; @@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev, return 0; } +static u32 nicvf_get_link(struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + + return nic->link_up; +} + static void nicvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev, static const struct ethtool_ops nicvf_ethtool_ops = { .get_settings = nicvf_get_settings, - .get_link = ethtool_op_get_link, + .get_link = nicvf_get_link, .get_drvinfo = nicvf_get_drvinfo, .get_msglevel = nicvf_get_msglevel, .set_msglevel = nicvf_set_msglevel, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a9377727c11c..dde8dc720cd3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev) netif_carrier_off(netdev); netif_tx_stop_all_queues(nic->netdev); + nic->link_up = false; /* Teardown secondary qsets first */ if (!nic->sqs_mode) { @@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev) nic->drv_stats.txq_stop = 0; nic->drv_stats.txq_wake = 0; - netif_carrier_on(netdev); - netif_tx_start_all_queues(netdev); - return 0; cleanup: nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); @@ -1583,8 +1581,14 @@ err_disable_device: static void nicvf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct nicvf *nic = netdev_priv(netdev); - struct net_device *pnetdev = nic->pnicvf->netdev; + struct nicvf *nic; + struct net_device *pnetdev; + + if (!netdev) + return; + + nic = netdev_priv(netdev); + pnetdev = nic->pnicvf->netdev; /* Check if this Qset is assigned to different VF. * If yes, clean primary and all secondary Qsets. diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index e404ea837727..206b6a71a545 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, - qidx, nic->cq_coalesce_usecs); + qidx, CMP_QUEUE_TIMER_THRESH); } /* Configures transmit queue */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index fb4957d09914..033e8306e91c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -76,7 +76,7 @@ #define CMP_QSIZE CMP_QUEUE_SIZE2 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) #define CMP_QUEUE_CQE_THRESH 0 -#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ +#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 180aa9fabf48..9df26c2263bc 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) } EXPORT_SYMBOL(bgx_set_lmac_mac); +void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + u64 cfg; + + if (!bgx) + return; + + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + if (enable) + cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; + else + cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); +} +EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); + static void bgx_sgmii_change_link_state(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; @@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work) lmac->last_duplex = 1; } else { lmac->link_up = 0; + lmac->last_speed = SPEED_UNKNOWN; + lmac->last_duplex = DUPLEX_UNKNOWN; } if (lmac->last_link != lmac->link_up) { @@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } /* Enable lmac */ - bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, - CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); /* Restore default cfg, incase low level firmware changed it */ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); @@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) lmac = &bgx->lmac[lmacid]; if (lmac->check_link) { /* Destroy work queue */ - cancel_delayed_work(&lmac->dwork); - flush_workqueue(lmac->check_link); + cancel_delayed_work_sync(&lmac->dwork); destroy_workqueue(lmac->check_link); } @@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct bgx *bgx = NULL; u8 lmac; + /* Load octeon mdio driver */ + octeon_mdiobus_force_mod_depencency(); + bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); if (!bgx) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 07b7ec66c60d..149e179363a1 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -182,6 +182,8 @@ enum MCAST_MODE { #define BCAST_ACCEPT 1 #define CAM_ACCEPT 1 +void octeon_mdiobus_force_mod_depencency(void); +void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); unsigned bgx_get_map(int node); int bgx_get_lmac_count(int node, int bgx); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index ed41559bae77..b553409e04ad 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800; #elif defined(__mips__) static int csr0 = 0x00200000 | 0x4000; #else -#warning Processor architecture undefined! -static int csr0 = 0x00A00000 | 0x4800; +static int csr0; #endif /* Operational parameters that usually are not changed. */ @@ -1982,6 +1981,12 @@ static int __init tulip_init (void) pr_info("%s", version); #endif + if (!csr0) { + pr_warn("tulip: unknown CPU architecture, using default csr0\n"); + /* default to 8 longword cache line alignment */ + csr0 = 0x00A00000 | 0x4800; + } + /* copy module parms into globals */ tulip_rx_copybreak = rx_copybreak; tulip_max_interrupt_work = max_interrupt_work; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 9beb3d34d4ba..3c0e4d5c5fef 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev) #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) i |= 0x4800; #else -#warning Processor architecture undefined + dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n"); i |= 0x4800; #endif iowrite32(i, ioaddr + PCIBusCfg); diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index f6e858d0b9d4..ebdc83247bb6 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig @@ -17,15 +17,16 @@ config NET_VENDOR_DLINK if NET_VENDOR_DLINK config DL2K - tristate "DL2000/TC902x-based Gigabit Ethernet support" + tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support" depends on PCI select CRC32 ---help--- - This driver supports DL2000/TC902x-based Gigabit ethernet cards, + This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards, which includes D-Link DGE-550T Gigabit Ethernet Adapter. D-Link DL2000-based Gigabit Ethernet Adapter. Sundance/Tamarack TC902x Gigabit Ethernet Adapter. + ICPlus IP1000A-based cards To compile this driver as a module, choose M here: the module will be called dl2k. diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index cf0a5fcdaaaf..ccca4799c27b 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_unmap_rx; + if (np->chip_id == CHIP_IP1000A && + (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { + /* PHY magic taken from ipg driver, undocumented registers */ + mii_write(dev, np->phy_addr, 31, 0x0001); + mii_write(dev, np->phy_addr, 27, 0x01e0); + mii_write(dev, np->phy_addr, 31, 0x0002); + mii_write(dev, np->phy_addr, 27, 0xeb8e); + mii_write(dev, np->phy_addr, 31, 0x0000); + mii_write(dev, np->phy_addr, 30, 0x005e); + /* advertise 1000BASE-T half & full duplex, prefer MASTER */ + mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); + } + /* Fiber device? */ np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; @@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev) for (i = 0; i < 6; i++) dev->dev_addr[i] = psrom->mac_addr[i]; + if (np->chip_id == CHIP_IP1000A) { + np->led_mode = psrom->led_mode; + return 0; + } + if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { return 0; } @@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev) return 0; } +static void rio_set_led_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + u32 mode; + + if (np->chip_id != CHIP_IP1000A) + return; + + mode = dr32(ASICCtrl); + mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); + + if (np->led_mode & 0x01) + mode |= IPG_AC_LED_MODE; + if (np->led_mode & 0x02) + mode |= IPG_AC_LED_MODE_BIT_1; + if (np->led_mode & 0x08) + mode |= IPG_AC_LED_SPEED; + + dw32(ASICCtrl, mode); +} + static int rio_open (struct net_device *dev) { @@ -424,6 +464,8 @@ rio_open (struct net_device *dev) GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); mdelay(10); + rio_set_led_mode(dev); + /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); @@ -433,9 +475,13 @@ rio_open (struct net_device *dev) alloc_list (dev); - /* Get station address */ - for (i = 0; i < 6; i++) - dw8(StationAddr0 + i, dev->dev_addr[i]); + /* Set station address */ + /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works + * too. However, it doesn't work on IP1000A so we use 16-bit access. + */ + for (i = 0; i < 3; i++) + dw16(StationAddr0 + 2 * i, + cpu_to_le16(((u16 *)dev->dev_addr)[i])); set_multicast (dev); if (np->coalesce) { @@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status) break; mdelay (1); } + rio_set_led_mode(dev); rio_free_tx (dev, 1); /* Reset TFDListPtr */ dw32(TFDListPtr0, np->tx_ring_dma + @@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status) break; mdelay (1); } + rio_set_led_mode(dev); /* Let TxStartThresh stay default value */ } /* Maximum Collisions */ @@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status) dev->name, int_status); dw16(ASICCtrl + 2, GlobalReset | HostReset); mdelay (500); + rio_set_led_mode(dev); } } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 23c07b007069..8f4f61262d5c 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits { ResetBusy = 0x0400, }; +#define IPG_AC_LED_MODE BIT(14) +#define IPG_AC_LED_SPEED BIT(27) +#define IPG_AC_LED_MODE_BIT_1 BIT(29) + /* Transmit Frame Control bits */ enum TFC_bits { DwordAlign = 0x00000000, @@ -332,7 +336,10 @@ typedef struct t_SROM { u16 asic_ctrl; /* 0x02 */ u16 sub_vendor_id; /* 0x04 */ u16 sub_system_id; /* 0x06 */ - u16 reserved1[12]; /* 0x08-0x1f */ + u16 pci_base_1; /* 0x08 (IP1000A only) */ + u16 pci_base_2; /* 0x0a (IP1000A only) */ + u16 led_mode; /* 0x0c (IP1000A only) */ + u16 reserved1[9]; /* 0x0e-0x1f */ u8 mac_addr[6]; /* 0x20-0x25 */ u8 reserved2[10]; /* 0x26-0x2f */ u8 sib[204]; /* 0x30-0xfb */ @@ -397,6 +404,7 @@ struct netdev_private { u16 advertising; /* NWay media advertisement */ u16 negotiate; /* Negotiated media */ int phy_addr; /* PHY addresses. */ + u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */ }; /* The station address location in the EEPROM. */ @@ -407,10 +415,15 @@ struct netdev_private { class_mask of the class are honored during the comparison. driver_data Data private to the driver. */ +#define CHIP_IP1000A 1 static const struct pci_device_id rio_pci_tbl[] = { {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A }, + { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A }, + { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A }, + { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A }, { } }; MODULE_DEVICE_TABLE (pci, rio_pci_tbl); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index f4cb8e425853..734f655c99c1 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, static int be_set_rss_hash_opts(struct be_adapter *adapter, struct ethtool_rxnfc *cmd) { - struct be_rx_obj *rxo; - int status = 0, i, j; - u8 rsstable[128]; + int status; u32 rss_flags = adapter->rss_info.rss_flags; if (cmd->data != L3_RSS_FLAGS && @@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter, } if (rss_flags == adapter->rss_info.rss_flags) - return status; - - if (be_multi_rxq(adapter)) { - for (j = 0; j < 128; j += adapter->num_rss_qs) { - for_all_rss_queues(adapter, rxo, i) { - if ((j + i) >= 128) - break; - rsstable[j + i] = rxo->rss_id; - } - } - } + return 0; status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, - rss_flags, 128, adapter->rss_info.rss_hkey); + rss_flags, RSS_INDIR_TABLE_LEN, + adapter->rss_info.rss_hkey); if (!status) adapter->rss_info.rss_flags = rss_flags; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eb48a977f8da..b6ad02909d6b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter) netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, - 128, rss_key); + RSS_INDIR_TABLE_LEN, rss_key); if (rc) { rss->rss_flags = RSS_ENABLE_NONE; return rc; diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ff76d4e9dc1b..bee32a9d9876 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE default y depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ M523x || M527x || M5272 || M528x || M520x || M532x || \ - ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ + ARCH_LAYERSCAPE ---help--- If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3e6b9b437497..7cf898455e60 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np, if (model && strcasecmp(model, "FEC")) { gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); - if (gfar_irq(grp, TX)->irq == NO_IRQ || - gfar_irq(grp, RX)->irq == NO_IRQ || - gfar_irq(grp, ER)->irq == NO_IRQ) + if (!gfar_irq(grp, TX)->irq || + !gfar_irq(grp, RX)->irq || + !gfar_irq(grp, ER)->irq) return -EINVAL; } diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 664d0c261269..b40fba929d65 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) etsects->irq = platform_get_irq(dev, 0); - if (etsects->irq == NO_IRQ) { + if (etsects->irq < 0) { pr_err("irq not in device tree\n"); goto no_node; } diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig deleted file mode 100644 index 14a66e9d2e26..000000000000 --- a/drivers/net/ethernet/icplus/Kconfig +++ /dev/null @@ -1,13 +0,0 @@ -# -# IC Plus device configuration -# - -config IP1000 - tristate "IP1000 Gigabit Ethernet support" - depends on PCI - select MII - ---help--- - This driver supports IP1000 gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called ipg. This is recommended. diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile deleted file mode 100644 index 5bc87c1f36aa..000000000000 --- a/drivers/net/ethernet/icplus/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the IC Plus device drivers -# - -obj-$(CONFIG_IP1000) += ipg.o diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c deleted file mode 100644 index c3b6af83f070..000000000000 --- a/drivers/net/ethernet/icplus/ipg.c +++ /dev/null @@ -1,2300 +0,0 @@ -/* - * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter - * - * Copyright (C) 2003, 2007 IC Plus Corp - * - * Original Author: - * - * Craig Rich - * Sundance Technology, Inc. - * www.sundanceti.com - * craig_rich@sundanceti.com - * - * Current Maintainer: - * - * Sorbica Shieh. - * http://www.icplus.com.tw - * sorbica@icplus.com.tw - * - * Jesse Huang - * http://www.icplus.com.tw - * jesse@icplus.com.tw - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/crc32.h> -#include <linux/ethtool.h> -#include <linux/interrupt.h> -#include <linux/gfp.h> -#include <linux/mii.h> -#include <linux/mutex.h> - -#include <asm/div64.h> - -#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) -#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) -#define IPG_RESET_MASK \ - (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ - IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ - IPG_AC_AUTO_INIT) - -#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) -#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) -#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) - -#define ipg_r32(reg) ioread32(ioaddr + (reg)) -#define ipg_r16(reg) ioread16(ioaddr + (reg)) -#define ipg_r8(reg) ioread8(ioaddr + (reg)) - -enum { - netdev_io_size = 128 -}; - -#include "ipg.h" -#define DRV_NAME "ipg" - -MODULE_AUTHOR("IC Plus Corp. 2003"); -MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); -MODULE_LICENSE("GPL"); - -/* - * Defaults - */ -#define IPG_MAX_RXFRAME_SIZE 0x0600 -#define IPG_RXFRAG_SIZE 0x0600 -#define IPG_RXSUPPORT_SIZE 0x0600 -#define IPG_IS_JUMBO false - -/* - * Variable record -- index by leading revision/length - * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN - */ -static const unsigned short DefaultPhyParam[] = { - /* 11/12/03 IP1000A v1-3 rev=0x40 */ - /*-------------------------------------------------------------------------- - (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, - 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, - 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, - --------------------------------------------------------------------------*/ - /* 12/17/03 IP1000A v1-4 rev=0x40 */ - (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, - 0x0000, - 30, 0x005e, 9, 0x0700, - /* 01/09/04 IP1000A v1-5 rev=0x41 */ - (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, - 0x0000, - 30, 0x005e, 9, 0x0700, - 0x0000 -}; - -static const char * const ipg_brand_name[] = { - "IC PLUS IP1000 1000/100/10 based NIC", - "Sundance Technology ST2021 based NIC", - "Tamarack Microelectronics TC9020/9021 based NIC", - "D-Link NIC IP1000A" -}; - -static const struct pci_device_id ipg_pci_tbl[] = { - { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, - { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, - { PCI_VDEVICE(DLINK, 0x9021), 2 }, - { PCI_VDEVICE(DLINK, 0x4020), 3 }, - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); - -static inline void __iomem *ipg_ioaddr(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - return sp->ioaddr; -} - -#ifdef IPG_DEBUG -static void ipg_dump_rfdlist(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - u32 offset; - - IPG_DEBUG_MSG("_dump_rfdlist\n"); - - netdev_info(dev, "rx_current = %02x\n", sp->rx_current); - netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty); - netdev_info(dev, "RFDList start address = %016lx\n", - (unsigned long)sp->rxd_map); - netdev_info(dev, "RFDListPtr register = %08x%08x\n", - ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); - - for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { - offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; - netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n", - i, offset, (unsigned long)sp->rxd[i].next_desc); - offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; - netdev_info(dev, "%02x %04x RFS = %016lx\n", - i, offset, (unsigned long)sp->rxd[i].rfs); - offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; - netdev_info(dev, "%02x %04x frag_info = %016lx\n", - i, offset, (unsigned long)sp->rxd[i].frag_info); - } -} - -static void ipg_dump_tfdlist(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - u32 offset; - - IPG_DEBUG_MSG("_dump_tfdlist\n"); - - netdev_info(dev, "tx_current = %02x\n", sp->tx_current); - netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty); - netdev_info(dev, "TFDList start address = %016lx\n", - (unsigned long) sp->txd_map); - netdev_info(dev, "TFDListPtr register = %08x%08x\n", - ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); - - for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { - offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; - netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n", - i, offset, (unsigned long)sp->txd[i].next_desc); - - offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; - netdev_info(dev, "%02x %04x TFC = %016lx\n", - i, offset, (unsigned long) sp->txd[i].tfc); - offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; - netdev_info(dev, "%02x %04x frag_info = %016lx\n", - i, offset, (unsigned long) sp->txd[i].frag_info); - } -} -#endif - -static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) -{ - ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); - ndelay(IPG_PC_PHYCTRLWAIT_NS); -} - -static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) -{ - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); -} - -static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) -{ - phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; - - ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); -} - -static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) -{ - ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | - phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); -} - -static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) -{ - u16 bit_data; - - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); - - bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; - - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); - - return bit_data; -} - -/* - * Read a register from the Physical Layer device located - * on the IPG NIC, using the IPG PHYCTRL register. - */ -static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) -{ - void __iomem *ioaddr = ipg_ioaddr(dev); - /* - * The GMII mangement frame structure for a read is as follows: - * - * |Preamble|st|op|phyad|regad|ta| data |idle| - * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | - * - * <32 1s> = 32 consecutive logic 1 values - * A = bit of Physical Layer device address (MSB first) - * R = bit of register address (MSB first) - * z = High impedance state - * D = bit of read data (MSB first) - * - * Transmission order is 'Preamble' field first, bits transmitted - * left to right (first to last). - */ - struct { - u32 field; - unsigned int len; - } p[] = { - { GMII_PREAMBLE, 32 }, /* Preamble */ - { GMII_ST, 2 }, /* ST */ - { GMII_READ, 2 }, /* OP */ - { phy_id, 5 }, /* PHYAD */ - { phy_reg, 5 }, /* REGAD */ - { 0x0000, 2 }, /* TA */ - { 0x0000, 16 }, /* DATA */ - { 0x0000, 1 } /* IDLE */ - }; - unsigned int i, j; - u8 polarity, data; - - polarity = ipg_r8(PHY_CTRL); - polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); - - /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ - for (j = 0; j < 5; j++) { - for (i = 0; i < p[j].len; i++) { - /* For each variable length field, the MSB must be - * transmitted first. Rotate through the field bits, - * starting with the MSB, and move each bit into the - * the 1st (2^1) bit position (this is the bit position - * corresponding to the MgmtData bit of the PhyCtrl - * register for the IPG). - * - * Example: ST = 01; - * - * First write a '0' to bit 1 of the PhyCtrl - * register, then write a '1' to bit 1 of the - * PhyCtrl register. - * - * To do this, right shift the MSB of ST by the value: - * [field length - 1 - #ST bits already written] - * then left shift this result by 1. - */ - data = (p[j].field >> (p[j].len - 1 - i)) << 1; - data &= IPG_PC_MGMTDATA; - data |= polarity | IPG_PC_MGMTDIR; - - ipg_drive_phy_ctl_low_high(ioaddr, data); - } - } - - send_three_state(ioaddr, polarity); - - read_phy_bit(ioaddr, polarity); - - /* - * For a read cycle, the bits for the next two fields (TA and - * DATA) are driven by the PHY (the IPG reads these bits). - */ - for (i = 0; i < p[6].len; i++) { - p[6].field |= - (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); - } - - send_three_state(ioaddr, polarity); - send_three_state(ioaddr, polarity); - send_three_state(ioaddr, polarity); - send_end(ioaddr, polarity); - - /* Return the value of the DATA field. */ - return p[6].field; -} - -/* - * Write to a register from the Physical Layer device located - * on the IPG NIC, using the IPG PHYCTRL register. - */ -static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) -{ - void __iomem *ioaddr = ipg_ioaddr(dev); - /* - * The GMII mangement frame structure for a read is as follows: - * - * |Preamble|st|op|phyad|regad|ta| data |idle| - * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | - * - * <32 1s> = 32 consecutive logic 1 values - * A = bit of Physical Layer device address (MSB first) - * R = bit of register address (MSB first) - * z = High impedance state - * D = bit of write data (MSB first) - * - * Transmission order is 'Preamble' field first, bits transmitted - * left to right (first to last). - */ - struct { - u32 field; - unsigned int len; - } p[] = { - { GMII_PREAMBLE, 32 }, /* Preamble */ - { GMII_ST, 2 }, /* ST */ - { GMII_WRITE, 2 }, /* OP */ - { phy_id, 5 }, /* PHYAD */ - { phy_reg, 5 }, /* REGAD */ - { 0x0002, 2 }, /* TA */ - { val & 0xffff, 16 }, /* DATA */ - { 0x0000, 1 } /* IDLE */ - }; - unsigned int i, j; - u8 polarity, data; - - polarity = ipg_r8(PHY_CTRL); - polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); - - /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ - for (j = 0; j < 7; j++) { - for (i = 0; i < p[j].len; i++) { - /* For each variable length field, the MSB must be - * transmitted first. Rotate through the field bits, - * starting with the MSB, and move each bit into the - * the 1st (2^1) bit position (this is the bit position - * corresponding to the MgmtData bit of the PhyCtrl - * register for the IPG). - * - * Example: ST = 01; - * - * First write a '0' to bit 1 of the PhyCtrl - * register, then write a '1' to bit 1 of the - * PhyCtrl register. - * - * To do this, right shift the MSB of ST by the value: - * [field length - 1 - #ST bits already written] - * then left shift this result by 1. - */ - data = (p[j].field >> (p[j].len - 1 - i)) << 1; - data &= IPG_PC_MGMTDATA; - data |= polarity | IPG_PC_MGMTDIR; - - ipg_drive_phy_ctl_low_high(ioaddr, data); - } - } - - /* The last cycle is a tri-state, so read from the PHY. */ - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); - ipg_r8(PHY_CTRL); - ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); -} - -static void ipg_set_led_mode(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - u32 mode; - - mode = ipg_r32(ASIC_CTRL); - mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); - - if ((sp->led_mode & 0x03) > 1) - mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ - - if ((sp->led_mode & 0x01) == 1) - mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ - - if ((sp->led_mode & 0x08) == 8) - mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ - - ipg_w32(mode, ASIC_CTRL); -} - -static void ipg_set_phy_set(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - int physet; - - physet = ipg_r8(PHY_SET); - physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); - physet |= ((sp->led_mode & 0x70) >> 4); - ipg_w8(physet, PHY_SET); -} - -static int ipg_reset(struct net_device *dev, u32 resetflags) -{ - /* Assert functional resets via the IPG AsicCtrl - * register as specified by the 'resetflags' input - * parameter. - */ - void __iomem *ioaddr = ipg_ioaddr(dev); - unsigned int timeout_count = 0; - - IPG_DEBUG_MSG("_reset\n"); - - ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); - - /* Delay added to account for problem with 10Mbps reset. */ - mdelay(IPG_AC_RESETWAIT); - - while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { - mdelay(IPG_AC_RESETWAIT); - if (++timeout_count > IPG_AC_RESET_TIMEOUT) - return -ETIME; - } - /* Set LED Mode in Asic Control */ - ipg_set_led_mode(dev); - - /* Set PHYSet Register Value */ - ipg_set_phy_set(dev); - return 0; -} - -/* Find the GMII PHY address. */ -static int ipg_find_phyaddr(struct net_device *dev) -{ - unsigned int phyaddr, i; - - for (i = 0; i < 32; i++) { - u32 status; - - /* Search for the correct PHY address among 32 possible. */ - phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; - - /* 10/22/03 Grace change verify from GMII_PHY_STATUS to - GMII_PHY_ID1 - */ - - status = mdio_read(dev, phyaddr, MII_BMSR); - - if ((status != 0xFFFF) && (status != 0)) - return phyaddr; - } - - return 0x1f; -} - -/* - * Configure IPG based on result of IEEE 802.3 PHY - * auto-negotiation. - */ -static int ipg_config_autoneg(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int txflowcontrol; - unsigned int rxflowcontrol; - unsigned int fullduplex; - u32 mac_ctrl_val; - u32 asicctrl; - u8 phyctrl; - const char *speed; - const char *duplex; - const char *tx_desc; - const char *rx_desc; - - IPG_DEBUG_MSG("_config_autoneg\n"); - - asicctrl = ipg_r32(ASIC_CTRL); - phyctrl = ipg_r8(PHY_CTRL); - mac_ctrl_val = ipg_r32(MAC_CTRL); - - /* Set flags for use in resolving auto-negotiation, assuming - * non-1000Mbps, half duplex, no flow control. - */ - fullduplex = 0; - txflowcontrol = 0; - rxflowcontrol = 0; - - /* To accommodate a problem in 10Mbps operation, - * set a global flag if PHY running in 10Mbps mode. - */ - sp->tenmbpsmode = 0; - - /* Determine actual speed of operation. */ - switch (phyctrl & IPG_PC_LINK_SPEED) { - case IPG_PC_LINK_SPEED_10MBPS: - speed = "10Mbps"; - sp->tenmbpsmode = 1; - break; - case IPG_PC_LINK_SPEED_100MBPS: - speed = "100Mbps"; - break; - case IPG_PC_LINK_SPEED_1000MBPS: - speed = "1000Mbps"; - break; - default: - speed = "undefined!"; - return 0; - } - - netdev_info(dev, "Link speed = %s\n", speed); - if (sp->tenmbpsmode == 1) - netdev_info(dev, "10Mbps operational mode enabled\n"); - - if (phyctrl & IPG_PC_DUPLEX_STATUS) { - fullduplex = 1; - txflowcontrol = 1; - rxflowcontrol = 1; - } - - /* Configure full duplex, and flow control. */ - if (fullduplex == 1) { - - /* Configure IPG for full duplex operation. */ - - duplex = "full"; - - mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; - - if (txflowcontrol == 1) { - tx_desc = ""; - mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; - } else { - tx_desc = "no "; - mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; - } - - if (rxflowcontrol == 1) { - rx_desc = ""; - mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; - } else { - rx_desc = "no "; - mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; - } - } else { - duplex = "half"; - tx_desc = "no "; - rx_desc = "no "; - mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD & - ~IPG_MC_TX_FLOW_CONTROL_ENABLE & - ~IPG_MC_RX_FLOW_CONTROL_ENABLE); - } - - netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n", - duplex, tx_desc, rx_desc); - ipg_w32(mac_ctrl_val, MAC_CTRL); - - return 0; -} - -/* Determine and configure multicast operation and set - * receive mode for IPG. - */ -static void ipg_nic_set_multicast_list(struct net_device *dev) -{ - void __iomem *ioaddr = ipg_ioaddr(dev); - struct netdev_hw_addr *ha; - unsigned int hashindex; - u32 hashtable[2]; - u8 receivemode; - - IPG_DEBUG_MSG("_nic_set_multicast_list\n"); - - receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; - - if (dev->flags & IFF_PROMISC) { - /* NIC to be configured in promiscuous mode. */ - receivemode = IPG_RM_RECEIVEALLFRAMES; - } else if ((dev->flags & IFF_ALLMULTI) || - ((dev->flags & IFF_MULTICAST) && - (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { - /* NIC to be configured to receive all multicast - * frames. */ - receivemode |= IPG_RM_RECEIVEMULTICAST; - } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { - /* NIC to be configured to receive selected - * multicast addresses. */ - receivemode |= IPG_RM_RECEIVEMULTICASTHASH; - } - - /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. - * The IPG applies a cyclic-redundancy-check (the same CRC - * used to calculate the frame data FCS) to the destination - * address all incoming multicast frames whose destination - * address has the multicast bit set. The least significant - * 6 bits of the CRC result are used as an addressing index - * into the hash table. If the value of the bit addressed by - * this index is a 1, the frame is passed to the host system. - */ - - /* Clear hashtable. */ - hashtable[0] = 0x00000000; - hashtable[1] = 0x00000000; - - /* Cycle through all multicast addresses to filter. */ - netdev_for_each_mc_addr(ha, dev) { - /* Calculate CRC result for each multicast address. */ - hashindex = crc32_le(0xffffffff, ha->addr, - ETH_ALEN); - - /* Use only the least significant 6 bits. */ - hashindex = hashindex & 0x3F; - - /* Within "hashtable", set bit number "hashindex" - * to a logic 1. - */ - set_bit(hashindex, (void *)hashtable); - } - - /* Write the value of the hashtable, to the 4, 16 bit - * HASHTABLE IPG registers. - */ - ipg_w32(hashtable[0], HASHTABLE_0); - ipg_w32(hashtable[1], HASHTABLE_1); - - ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); - - IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); -} - -static int ipg_io_config(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = ipg_ioaddr(dev); - u32 origmacctrl; - u32 restoremacctrl; - - IPG_DEBUG_MSG("_io_config\n"); - - origmacctrl = ipg_r32(MAC_CTRL); - - restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; - - /* Based on compilation option, determine if FCS is to be - * stripped on receive frames by IPG. - */ - if (!IPG_STRIP_FCS_ON_RX) - restoremacctrl |= IPG_MC_RCV_FCS; - - /* Determine if transmitter and/or receiver are - * enabled so we may restore MACCTRL correctly. - */ - if (origmacctrl & IPG_MC_TX_ENABLED) - restoremacctrl |= IPG_MC_TX_ENABLE; - - if (origmacctrl & IPG_MC_RX_ENABLED) - restoremacctrl |= IPG_MC_RX_ENABLE; - - /* Transmitter and receiver must be disabled before setting - * IFSSelect. - */ - ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & - IPG_MC_RSVD_MASK, MAC_CTRL); - - /* Now that transmitter and receiver are disabled, write - * to IFSSelect. - */ - ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); - - /* Set RECEIVEMODE register. */ - ipg_nic_set_multicast_list(dev); - - ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); - - ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); - ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); - ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); - ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); - ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); - ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); - ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | - IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | - IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | - IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); - ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); - ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); - - /* IPG multi-frag frame bug workaround. - * Per silicon revision B3 eratta. - */ - ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); - - /* IPG TX poll now bug workaround. - * Per silicon revision B3 eratta. - */ - ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); - - /* IPG RX poll now bug workaround. - * Per silicon revision B3 eratta. - */ - ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); - - /* Now restore MACCTRL to original setting. */ - ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); - - /* Disable unused RMON statistics. */ - ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); - - /* Disable unused MIB statistics. */ - ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | - IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | - IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | - IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | - IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | - IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); - - return 0; -} - -/* - * Create a receive buffer within system memory and update - * NIC private structure appropriately. - */ -static int ipg_get_rxbuff(struct net_device *dev, int entry) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - struct ipg_rx *rxfd = sp->rxd + entry; - struct sk_buff *skb; - u64 rxfragsize; - - IPG_DEBUG_MSG("_get_rxbuff\n"); - - skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); - if (!skb) { - sp->rx_buff[entry] = NULL; - return -ENOMEM; - } - - /* Save the address of the sk_buff structure. */ - sp->rx_buff[entry] = skb; - - rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); - - /* Set the RFD fragment length. */ - rxfragsize = sp->rxfrag_size; - rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); - - return 0; -} - -static int init_rfdlist(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - - IPG_DEBUG_MSG("_init_rfdlist\n"); - - for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { - struct ipg_rx *rxfd = sp->rxd + i; - - if (sp->rx_buff[i]) { - pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_irq(sp->rx_buff[i]); - sp->rx_buff[i] = NULL; - } - - /* Clear out the RFS field. */ - rxfd->rfs = 0x0000000000000000; - - if (ipg_get_rxbuff(dev, i) < 0) { - /* - * A receive buffer was not ready, break the - * RFD list here. - */ - IPG_DEBUG_MSG("Cannot allocate Rx buffer\n"); - - /* Just in case we cannot allocate a single RFD. - * Should not occur. - */ - if (i == 0) { - netdev_err(dev, "No memory available for RFD list\n"); - return -ENOMEM; - } - } - - rxfd->next_desc = cpu_to_le64(sp->rxd_map + - sizeof(struct ipg_rx)*(i + 1)); - } - sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); - - sp->rx_current = 0; - sp->rx_dirty = 0; - - /* Write the location of the RFDList to the IPG. */ - ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); - ipg_w32(0x00000000, RFD_LIST_PTR_1); - - return 0; -} - -static void init_tfdlist(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - - IPG_DEBUG_MSG("_init_tfdlist\n"); - - for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { - struct ipg_tx *txfd = sp->txd + i; - - txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); - - if (sp->tx_buff[i]) { - dev_kfree_skb_irq(sp->tx_buff[i]); - sp->tx_buff[i] = NULL; - } - - txfd->next_desc = cpu_to_le64(sp->txd_map + - sizeof(struct ipg_tx)*(i + 1)); - } - sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); - - sp->tx_current = 0; - sp->tx_dirty = 0; - - /* Write the location of the TFDList to the IPG. */ - IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n", - (u32) sp->txd_map); - ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); - ipg_w32(0x00000000, TFD_LIST_PTR_1); - - sp->reset_current_tfd = 1; -} - -/* - * Free all transmit buffers which have already been transferred - * via DMA to the IPG. - */ -static void ipg_nic_txfree(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - unsigned int released, pending, dirty; - - IPG_DEBUG_MSG("_nic_txfree\n"); - - pending = sp->tx_current - sp->tx_dirty; - dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; - - for (released = 0; released < pending; released++) { - struct sk_buff *skb = sp->tx_buff[dirty]; - struct ipg_tx *txfd = sp->txd + dirty; - - IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc); - - /* Look at each TFD's TFC field beginning - * at the last freed TFD up to the current TFD. - * If the TFDDone bit is set, free the associated - * buffer. - */ - if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) - break; - - /* Free the transmit buffer. */ - if (skb) { - pci_unmap_single(sp->pdev, - le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, - skb->len, PCI_DMA_TODEVICE); - - dev_kfree_skb_irq(skb); - - sp->tx_buff[dirty] = NULL; - } - dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; - } - - sp->tx_dirty += released; - - if (netif_queue_stopped(dev) && - (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { - netif_wake_queue(dev); - } -} - -static void ipg_tx_timeout(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - - ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | - IPG_AC_FIFO); - - spin_lock_irq(&sp->lock); - - /* Re-configure after DMA reset. */ - if (ipg_io_config(dev) < 0) - netdev_info(dev, "Error during re-configuration\n"); - - init_tfdlist(dev); - - spin_unlock_irq(&sp->lock); - - ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, - MAC_CTRL); -} - -/* - * For TxComplete interrupts, free all transmit - * buffers which have already been transferred via DMA - * to the IPG. - */ -static void ipg_nic_txcleanup(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - - IPG_DEBUG_MSG("_nic_txcleanup\n"); - - for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { - /* Reading the TXSTATUS register clears the - * TX_COMPLETE interrupt. - */ - u32 txstatusdword = ipg_r32(TX_STATUS); - - IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword); - - /* Check for Transmit errors. Error bits only valid if - * TX_COMPLETE bit in the TXSTATUS register is a 1. - */ - if (!(txstatusdword & IPG_TS_TX_COMPLETE)) - break; - - /* If in 10Mbps mode, indicate transmit is ready. */ - if (sp->tenmbpsmode) { - netif_wake_queue(dev); - } - - /* Transmit error, increment stat counters. */ - if (txstatusdword & IPG_TS_TX_ERROR) { - IPG_DEBUG_MSG("Transmit error\n"); - sp->stats.tx_errors++; - } - - /* Late collision, re-enable transmitter. */ - if (txstatusdword & IPG_TS_LATE_COLLISION) { - IPG_DEBUG_MSG("Late collision on transmit\n"); - ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & - IPG_MC_RSVD_MASK, MAC_CTRL); - } - - /* Maximum collisions, re-enable transmitter. */ - if (txstatusdword & IPG_TS_TX_MAX_COLL) { - IPG_DEBUG_MSG("Maximum collisions on transmit\n"); - ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & - IPG_MC_RSVD_MASK, MAC_CTRL); - } - - /* Transmit underrun, reset and re-enable - * transmitter. - */ - if (txstatusdword & IPG_TS_TX_UNDERRUN) { - IPG_DEBUG_MSG("Transmitter underrun\n"); - sp->stats.tx_fifo_errors++; - ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | - IPG_AC_NETWORK | IPG_AC_FIFO); - - /* Re-configure after DMA reset. */ - if (ipg_io_config(dev) < 0) { - netdev_info(dev, "Error during re-configuration\n"); - } - init_tfdlist(dev); - - ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & - IPG_MC_RSVD_MASK, MAC_CTRL); - } - } - - ipg_nic_txfree(dev); -} - -/* Provides statistical information about the IPG NIC. */ -static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - u16 temp1; - u16 temp2; - - IPG_DEBUG_MSG("_nic_get_stats\n"); - - /* Check to see if the NIC has been initialized via nic_open, - * before trying to read statistic registers. - */ - if (!netif_running(dev)) - return &sp->stats; - - sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); - sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); - sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); - sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); - temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); - sp->stats.rx_errors += temp1; - sp->stats.rx_missed_errors += temp1; - temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + - ipg_r32(IPG_LATECOLLISIONS); - temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); - sp->stats.collisions += temp1; - sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); - sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + - ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; - sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); - - /* detailed tx_errors */ - sp->stats.tx_carrier_errors += temp2; - - /* detailed rx_errors */ - sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + - ipg_r16(IPG_FRAMETOOLONGERRORS); - sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); - - /* Unutilized IPG statistic registers. */ - ipg_r32(IPG_MCSTFRAMESRCVDOK); - - return &sp->stats; -} - -/* Restore used receive buffers. */ -static int ipg_nic_rxrestore(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - const unsigned int curr = sp->rx_current; - unsigned int dirty = sp->rx_dirty; - - IPG_DEBUG_MSG("_nic_rxrestore\n"); - - for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { - unsigned int entry = dirty % IPG_RFDLIST_LENGTH; - - /* rx_copybreak may poke hole here and there. */ - if (sp->rx_buff[entry]) - continue; - - /* Generate a new receive buffer to replace the - * current buffer (which will be released by the - * Linux system). - */ - if (ipg_get_rxbuff(dev, entry) < 0) { - IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n"); - - break; - } - - /* Reset the RFS field. */ - sp->rxd[entry].rfs = 0x0000000000000000; - } - sp->rx_dirty = dirty; - - return 0; -} - -/* use jumboindex and jumbosize to control jumbo frame status - * initial status is jumboindex=-1 and jumbosize=0 - * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. - * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving - * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump - * previous receiving and need to continue dumping the current one - */ -enum { - NORMAL_PACKET, - ERROR_PACKET -}; - -enum { - FRAME_NO_START_NO_END = 0, - FRAME_WITH_START = 1, - FRAME_WITH_END = 10, - FRAME_WITH_START_WITH_END = 11 -}; - -static void ipg_nic_rx_free_skb(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; - - if (sp->rx_buff[entry]) { - struct ipg_rx *rxfd = sp->rxd + entry; - - pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_irq(sp->rx_buff[entry]); - sp->rx_buff[entry] = NULL; - } -} - -static int ipg_nic_rx_check_frame_type(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); - int type = FRAME_NO_START_NO_END; - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) - type += FRAME_WITH_START; - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) - type += FRAME_WITH_END; - return type; -} - -static int ipg_nic_rx_check_error(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; - struct ipg_rx *rxfd = sp->rxd + entry; - - if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & - (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | - IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | - IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { - IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", - (unsigned long) rxfd->rfs); - - /* Increment general receive error statistic. */ - sp->stats.rx_errors++; - - /* Increment detailed receive error statistics. */ - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { - IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); - - sp->stats.rx_fifo_errors++; - } - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { - IPG_DEBUG_MSG("RX runt occurred\n"); - sp->stats.rx_length_errors++; - } - - /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, - * error count handled by a IPG statistic register. - */ - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { - IPG_DEBUG_MSG("RX alignment error occurred\n"); - sp->stats.rx_frame_errors++; - } - - /* Do nothing for IPG_RFS_RXFCSERROR, error count - * handled by a IPG statistic register. - */ - - /* Free the memory associated with the RX - * buffer since it is erroneous and we will - * not pass it to higher layer processes. - */ - if (sp->rx_buff[entry]) { - pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - - dev_kfree_skb_irq(sp->rx_buff[entry]); - sp->rx_buff[entry] = NULL; - } - return ERROR_PACKET; - } - return NORMAL_PACKET; -} - -static void ipg_nic_rx_with_start_and_end(struct net_device *dev, - struct ipg_nic_private *sp, - struct ipg_rx *rxfd, unsigned entry) -{ - struct ipg_jumbo *jumbo = &sp->jumbo; - struct sk_buff *skb; - int framelen; - - if (jumbo->found_start) { - dev_kfree_skb_irq(jumbo->skb); - jumbo->found_start = 0; - jumbo->current_size = 0; - jumbo->skb = NULL; - } - - /* 1: found error, 0 no error */ - if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) - return; - - skb = sp->rx_buff[entry]; - if (!skb) - return; - - /* accept this frame and send to upper layer */ - framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; - if (framelen > sp->rxfrag_size) - framelen = sp->rxfrag_size; - - skb_put(skb, framelen); - skb->protocol = eth_type_trans(skb, dev); - skb_checksum_none_assert(skb); - netif_rx(skb); - sp->rx_buff[entry] = NULL; -} - -static void ipg_nic_rx_with_start(struct net_device *dev, - struct ipg_nic_private *sp, - struct ipg_rx *rxfd, unsigned entry) -{ - struct ipg_jumbo *jumbo = &sp->jumbo; - struct pci_dev *pdev = sp->pdev; - struct sk_buff *skb; - - /* 1: found error, 0 no error */ - if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) - return; - - /* accept this frame and send to upper layer */ - skb = sp->rx_buff[entry]; - if (!skb) - return; - - if (jumbo->found_start) - dev_kfree_skb_irq(jumbo->skb); - - pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - - skb_put(skb, sp->rxfrag_size); - - jumbo->found_start = 1; - jumbo->current_size = sp->rxfrag_size; - jumbo->skb = skb; - - sp->rx_buff[entry] = NULL; -} - -static void ipg_nic_rx_with_end(struct net_device *dev, - struct ipg_nic_private *sp, - struct ipg_rx *rxfd, unsigned entry) -{ - struct ipg_jumbo *jumbo = &sp->jumbo; - - /* 1: found error, 0 no error */ - if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { - struct sk_buff *skb = sp->rx_buff[entry]; - - if (!skb) - return; - - if (jumbo->found_start) { - int framelen, endframelen; - - framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; - - endframelen = framelen - jumbo->current_size; - if (framelen > sp->rxsupport_size) - dev_kfree_skb_irq(jumbo->skb); - else { - memcpy(skb_put(jumbo->skb, endframelen), - skb->data, endframelen); - - jumbo->skb->protocol = - eth_type_trans(jumbo->skb, dev); - - skb_checksum_none_assert(jumbo->skb); - netif_rx(jumbo->skb); - } - } - - jumbo->found_start = 0; - jumbo->current_size = 0; - jumbo->skb = NULL; - - ipg_nic_rx_free_skb(dev); - } else { - dev_kfree_skb_irq(jumbo->skb); - jumbo->found_start = 0; - jumbo->current_size = 0; - jumbo->skb = NULL; - } -} - -static void ipg_nic_rx_no_start_no_end(struct net_device *dev, - struct ipg_nic_private *sp, - struct ipg_rx *rxfd, unsigned entry) -{ - struct ipg_jumbo *jumbo = &sp->jumbo; - - /* 1: found error, 0 no error */ - if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { - struct sk_buff *skb = sp->rx_buff[entry]; - - if (skb) { - if (jumbo->found_start) { - jumbo->current_size += sp->rxfrag_size; - if (jumbo->current_size <= sp->rxsupport_size) { - memcpy(skb_put(jumbo->skb, - sp->rxfrag_size), - skb->data, sp->rxfrag_size); - } - } - ipg_nic_rx_free_skb(dev); - } - } else { - dev_kfree_skb_irq(jumbo->skb); - jumbo->found_start = 0; - jumbo->current_size = 0; - jumbo->skb = NULL; - } -} - -static int ipg_nic_rx_jumbo(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - unsigned int curr = sp->rx_current; - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - - IPG_DEBUG_MSG("_nic_rx\n"); - - for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { - unsigned int entry = curr % IPG_RFDLIST_LENGTH; - struct ipg_rx *rxfd = sp->rxd + entry; - - if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) - break; - - switch (ipg_nic_rx_check_frame_type(dev)) { - case FRAME_WITH_START_WITH_END: - ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); - break; - case FRAME_WITH_START: - ipg_nic_rx_with_start(dev, sp, rxfd, entry); - break; - case FRAME_WITH_END: - ipg_nic_rx_with_end(dev, sp, rxfd, entry); - break; - case FRAME_NO_START_NO_END: - ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); - break; - } - } - - sp->rx_current = curr; - - if (i == IPG_MAXRFDPROCESS_COUNT) { - /* There are more RFDs to process, however the - * allocated amount of RFD processing time has - * expired. Assert Interrupt Requested to make - * sure we come back to process the remaining RFDs. - */ - ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); - } - - ipg_nic_rxrestore(dev); - - return 0; -} - -static int ipg_nic_rx(struct net_device *dev) -{ - /* Transfer received Ethernet frames to higher network layers. */ - struct ipg_nic_private *sp = netdev_priv(dev); - unsigned int curr = sp->rx_current; - void __iomem *ioaddr = sp->ioaddr; - struct ipg_rx *rxfd; - unsigned int i; - - IPG_DEBUG_MSG("_nic_rx\n"); - -#define __RFS_MASK \ - cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) - - for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { - unsigned int entry = curr % IPG_RFDLIST_LENGTH; - struct sk_buff *skb = sp->rx_buff[entry]; - unsigned int framelen; - - rxfd = sp->rxd + entry; - - if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) - break; - - /* Get received frame length. */ - framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; - - /* Check for jumbo frame arrival with too small - * RXFRAG_SIZE. - */ - if (framelen > sp->rxfrag_size) { - IPG_DEBUG_MSG - ("RFS FrameLen > allocated fragment size\n"); - - framelen = sp->rxfrag_size; - } - - if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & - (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | - IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | - IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { - - IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", - (unsigned long int) rxfd->rfs); - - /* Increment general receive error statistic. */ - sp->stats.rx_errors++; - - /* Increment detailed receive error statistics. */ - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { - IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); - sp->stats.rx_fifo_errors++; - } - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { - IPG_DEBUG_MSG("RX runt occurred\n"); - sp->stats.rx_length_errors++; - } - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; - /* Do nothing, error count handled by a IPG - * statistic register. - */ - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { - IPG_DEBUG_MSG("RX alignment error occurred\n"); - sp->stats.rx_frame_errors++; - } - - if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; - /* Do nothing, error count handled by a IPG - * statistic register. - */ - - /* Free the memory associated with the RX - * buffer since it is erroneous and we will - * not pass it to higher layer processes. - */ - if (skb) { - __le64 info = rxfd->frag_info; - - pci_unmap_single(sp->pdev, - le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - - dev_kfree_skb_irq(skb); - } - } else { - - /* Adjust the new buffer length to accommodate the size - * of the received frame. - */ - skb_put(skb, framelen); - - /* Set the buffer's protocol field to Ethernet. */ - skb->protocol = eth_type_trans(skb, dev); - - /* The IPG encountered an error with (or - * there were no) IP/TCP/UDP checksums. - * This may or may not indicate an invalid - * IP/TCP/UDP frame was received. Let the - * upper layer decide. - */ - skb_checksum_none_assert(skb); - - /* Hand off frame for higher layer processing. - * The function netif_rx() releases the sk_buff - * when processing completes. - */ - netif_rx(skb); - } - - /* Assure RX buffer is not reused by IPG. */ - sp->rx_buff[entry] = NULL; - } - - /* - * If there are more RFDs to process and the allocated amount of RFD - * processing time has expired, assert Interrupt Requested to make - * sure we come back to process the remaining RFDs. - */ - if (i == IPG_MAXRFDPROCESS_COUNT) - ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); - -#ifdef IPG_DEBUG - /* Check if the RFD list contained no receive frame data. */ - if (!i) - sp->EmptyRFDListCount++; -#endif - while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && - !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && - (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { - unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; - - rxfd = sp->rxd + entry; - - IPG_DEBUG_MSG("Frame requires multiple RFDs\n"); - - /* An unexpected event, additional code needed to handle - * properly. So for the time being, just disregard the - * frame. - */ - - /* Free the memory associated with the RX - * buffer since it is erroneous and we will - * not pass it to higher layer processes. - */ - if (sp->rx_buff[entry]) { - pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_irq(sp->rx_buff[entry]); - } - - /* Assure RX buffer is not reused by IPG. */ - sp->rx_buff[entry] = NULL; - } - - sp->rx_current = curr; - - /* Check to see if there are a minimum number of used - * RFDs before restoring any (should improve performance.) - */ - if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) - ipg_nic_rxrestore(dev); - - return 0; -} - -static void ipg_reset_after_host_error(struct work_struct *work) -{ - struct ipg_nic_private *sp = - container_of(work, struct ipg_nic_private, task.work); - struct net_device *dev = sp->dev; - - /* - * Acknowledge HostError interrupt by resetting - * IPG DMA and HOST. - */ - ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); - - init_rfdlist(dev); - init_tfdlist(dev); - - if (ipg_io_config(dev) < 0) { - netdev_info(dev, "Cannot recover from PCI error\n"); - schedule_delayed_work(&sp->task, HZ); - } -} - -static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) -{ - struct net_device *dev = dev_inst; - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int handled = 0; - u16 status; - - IPG_DEBUG_MSG("_interrupt_handler\n"); - - if (sp->is_jumbo) - ipg_nic_rxrestore(dev); - - spin_lock(&sp->lock); - - /* Get interrupt source information, and acknowledge - * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, - * IntRequested, MacControlFrame, LinkEvent) interrupts - * if issued. Also, all IPG interrupts are disabled by - * reading IntStatusAck. - */ - status = ipg_r16(INT_STATUS_ACK); - - IPG_DEBUG_MSG("IntStatusAck = %04x\n", status); - - /* Shared IRQ of remove event. */ - if (!(status & IPG_IS_RSVD_MASK)) - goto out_enable; - - handled = 1; - - if (unlikely(!netif_running(dev))) - goto out_unlock; - - /* If RFDListEnd interrupt, restore all used RFDs. */ - if (status & IPG_IS_RFD_LIST_END) { - IPG_DEBUG_MSG("RFDListEnd Interrupt\n"); - - /* The RFD list end indicates an RFD was encountered - * with a 0 NextPtr, or with an RFDDone bit set to 1 - * (indicating the RFD is not read for use by the - * IPG.) Try to restore all RFDs. - */ - ipg_nic_rxrestore(dev); - -#ifdef IPG_DEBUG - /* Increment the RFDlistendCount counter. */ - sp->RFDlistendCount++; -#endif - } - - /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or - * IntRequested interrupt, process received frames. */ - if ((status & IPG_IS_RX_DMA_PRIORITY) || - (status & IPG_IS_RFD_LIST_END) || - (status & IPG_IS_RX_DMA_COMPLETE) || - (status & IPG_IS_INT_REQUESTED)) { -#ifdef IPG_DEBUG - /* Increment the RFD list checked counter if interrupted - * only to check the RFD list. */ - if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | - IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & - (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | - IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | - IPG_IS_UPDATE_STATS))) - sp->RFDListCheckedCount++; -#endif - - if (sp->is_jumbo) - ipg_nic_rx_jumbo(dev); - else - ipg_nic_rx(dev); - } - - /* If TxDMAComplete interrupt, free used TFDs. */ - if (status & IPG_IS_TX_DMA_COMPLETE) - ipg_nic_txfree(dev); - - /* TxComplete interrupts indicate one of numerous actions. - * Determine what action to take based on TXSTATUS register. - */ - if (status & IPG_IS_TX_COMPLETE) - ipg_nic_txcleanup(dev); - - /* If UpdateStats interrupt, update Linux Ethernet statistics */ - if (status & IPG_IS_UPDATE_STATS) - ipg_nic_get_stats(dev); - - /* If HostError interrupt, reset IPG. */ - if (status & IPG_IS_HOST_ERROR) { - IPG_DDEBUG_MSG("HostError Interrupt\n"); - - schedule_delayed_work(&sp->task, 0); - } - - /* If LinkEvent interrupt, resolve autonegotiation. */ - if (status & IPG_IS_LINK_EVENT) { - if (ipg_config_autoneg(dev) < 0) - netdev_info(dev, "Auto-negotiation error\n"); - } - - /* If MACCtrlFrame interrupt, do nothing. */ - if (status & IPG_IS_MAC_CTRL_FRAME) - IPG_DEBUG_MSG("MACCtrlFrame interrupt\n"); - - /* If RxComplete interrupt, do nothing. */ - if (status & IPG_IS_RX_COMPLETE) - IPG_DEBUG_MSG("RxComplete interrupt\n"); - - /* If RxEarly interrupt, do nothing. */ - if (status & IPG_IS_RX_EARLY) - IPG_DEBUG_MSG("RxEarly interrupt\n"); - -out_enable: - /* Re-enable IPG interrupts. */ - ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | - IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | - IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); -out_unlock: - spin_unlock(&sp->lock); - - return IRQ_RETVAL(handled); -} - -static void ipg_rx_clear(struct ipg_nic_private *sp) -{ - unsigned int i; - - for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { - if (sp->rx_buff[i]) { - struct ipg_rx *rxfd = sp->rxd + i; - - dev_kfree_skb_irq(sp->rx_buff[i]); - sp->rx_buff[i] = NULL; - pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, - sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - } - } -} - -static void ipg_tx_clear(struct ipg_nic_private *sp) -{ - unsigned int i; - - for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { - if (sp->tx_buff[i]) { - struct ipg_tx *txfd = sp->txd + i; - - pci_unmap_single(sp->pdev, - le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, - sp->tx_buff[i]->len, PCI_DMA_TODEVICE); - - dev_kfree_skb_irq(sp->tx_buff[i]); - - sp->tx_buff[i] = NULL; - } - } -} - -static int ipg_nic_open(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - struct pci_dev *pdev = sp->pdev; - int rc; - - IPG_DEBUG_MSG("_nic_open\n"); - - sp->rx_buf_sz = sp->rxsupport_size; - - /* Check for interrupt line conflicts, and request interrupt - * line for IPG. - * - * IMPORTANT: Disable IPG interrupts prior to registering - * IRQ. - */ - ipg_w16(0x0000, INT_ENABLE); - - /* Register the interrupt line to be used by the IPG within - * the Linux system. - */ - rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, - dev->name, dev); - if (rc < 0) { - netdev_info(dev, "Error when requesting interrupt\n"); - goto out; - } - - dev->irq = pdev->irq; - - rc = -ENOMEM; - - sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, - &sp->rxd_map, GFP_KERNEL); - if (!sp->rxd) - goto err_free_irq_0; - - sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, - &sp->txd_map, GFP_KERNEL); - if (!sp->txd) - goto err_free_rx_1; - - rc = init_rfdlist(dev); - if (rc < 0) { - netdev_info(dev, "Error during configuration\n"); - goto err_free_tx_2; - } - - init_tfdlist(dev); - - rc = ipg_io_config(dev); - if (rc < 0) { - netdev_info(dev, "Error during configuration\n"); - goto err_release_tfdlist_3; - } - - /* Resolve autonegotiation. */ - if (ipg_config_autoneg(dev) < 0) - netdev_info(dev, "Auto-negotiation error\n"); - - /* initialize JUMBO Frame control variable */ - sp->jumbo.found_start = 0; - sp->jumbo.current_size = 0; - sp->jumbo.skb = NULL; - - /* Enable transmit and receive operation of the IPG. */ - ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & - IPG_MC_RSVD_MASK, MAC_CTRL); - - netif_start_queue(dev); -out: - return rc; - -err_release_tfdlist_3: - ipg_tx_clear(sp); - ipg_rx_clear(sp); -err_free_tx_2: - dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); -err_free_rx_1: - dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); -err_free_irq_0: - free_irq(pdev->irq, dev); - goto out; -} - -static int ipg_nic_stop(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - struct pci_dev *pdev = sp->pdev; - - IPG_DEBUG_MSG("_nic_stop\n"); - - netif_stop_queue(dev); - - IPG_DUMPTFDLIST(dev); - - do { - (void) ipg_r16(INT_STATUS_ACK); - - ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); - - synchronize_irq(pdev->irq); - } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); - - ipg_rx_clear(sp); - - ipg_tx_clear(sp); - - pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); - pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); - - free_irq(pdev->irq, dev); - - return 0; -} - -static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; - unsigned long flags; - struct ipg_tx *txfd; - - IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); - - /* If in 10Mbps mode, stop the transmit queue so - * no more transmit frames are accepted. - */ - if (sp->tenmbpsmode) - netif_stop_queue(dev); - - if (sp->reset_current_tfd) { - sp->reset_current_tfd = 0; - entry = 0; - } - - txfd = sp->txd + entry; - - sp->tx_buff[entry] = skb; - - /* Clear all TFC fields, except TFDDONE. */ - txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); - - /* Specify the TFC field within the TFD. */ - txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | - (IPG_TFC_FRAMEID & sp->tx_current) | - (IPG_TFC_FRAGCOUNT & (1 << 24))); - /* - * 16--17 (WordAlign) <- 3 (disable), - * 0--15 (FrameId) <- sp->tx_current, - * 24--27 (FragCount) <- 1 - */ - - /* Request TxComplete interrupts at an interval defined - * by the constant IPG_FRAMESBETWEENTXCOMPLETES. - * Request TxComplete interrupt for every frame - * if in 10Mbps mode to accommodate problem with 10Mbps - * processing. - */ - if (sp->tenmbpsmode) - txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); - txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); - /* Based on compilation option, determine if FCS is to be - * appended to transmit frame by IPG. - */ - if (!(IPG_APPEND_FCS_ON_TX)) - txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); - - /* Based on compilation option, determine if IP, TCP and/or - * UDP checksums are to be added to transmit frame by IPG. - */ - if (IPG_ADD_IPCHECKSUM_ON_TX) - txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); - - if (IPG_ADD_TCPCHECKSUM_ON_TX) - txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); - - if (IPG_ADD_UDPCHECKSUM_ON_TX) - txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); - - /* Based on compilation option, determine if VLAN tag info is to be - * inserted into transmit frame by IPG. - */ - if (IPG_INSERT_MANUAL_VLAN_TAG) { - txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | - ((u64) IPG_MANUAL_VLAN_VID << 32) | - ((u64) IPG_MANUAL_VLAN_CFI << 44) | - ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); - } - - /* The fragment start location within system memory is defined - * by the sk_buff structure's data field. The physical address - * of this location within the system's virtual memory space - * is determined using the IPG_HOST2BUS_MAP function. - */ - txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE)); - - /* The length of the fragment within system memory is defined by - * the sk_buff structure's len field. - */ - txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & - ((u64) (skb->len & 0xffff) << 48)); - - /* Clear the TFDDone bit last to indicate the TFD is ready - * for transfer to the IPG. - */ - txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); - - spin_lock_irqsave(&sp->lock, flags); - - sp->tx_current++; - - mmiowb(); - - ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); - - if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) - netif_stop_queue(dev); - - spin_unlock_irqrestore(&sp->lock, flags); - - return NETDEV_TX_OK; -} - -static void ipg_set_phy_default_param(unsigned char rev, - struct net_device *dev, int phy_address) -{ - unsigned short length; - unsigned char revision; - const unsigned short *phy_param; - unsigned short address, value; - - phy_param = &DefaultPhyParam[0]; - length = *phy_param & 0x00FF; - revision = (unsigned char)((*phy_param) >> 8); - phy_param++; - while (length != 0) { - if (rev == revision) { - while (length > 1) { - address = *phy_param; - value = *(phy_param + 1); - phy_param += 2; - mdio_write(dev, phy_address, address, value); - length -= 4; - } - break; - } else { - phy_param += length / 2; - length = *phy_param & 0x00FF; - revision = (unsigned char)((*phy_param) >> 8); - phy_param++; - } - } -} - -static int read_eeprom(struct net_device *dev, int eep_addr) -{ - void __iomem *ioaddr = ipg_ioaddr(dev); - unsigned int i; - int ret = 0; - u16 value; - - value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); - ipg_w16(value, EEPROM_CTRL); - - for (i = 0; i < 1000; i++) { - u16 data; - - mdelay(10); - data = ipg_r16(EEPROM_CTRL); - if (!(data & IPG_EC_EEPROM_BUSY)) { - ret = ipg_r16(EEPROM_DATA); - break; - } - } - return ret; -} - -static void ipg_init_mii(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - struct mii_if_info *mii_if = &sp->mii_if; - int phyaddr; - - mii_if->dev = dev; - mii_if->mdio_read = mdio_read; - mii_if->mdio_write = mdio_write; - mii_if->phy_id_mask = 0x1f; - mii_if->reg_num_mask = 0x1f; - - mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); - - if (phyaddr != 0x1f) { - u16 mii_phyctrl, mii_1000cr; - - mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); - mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | - GMII_PHY_1000BASETCONTROL_PreferMaster; - mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); - - mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); - - /* Set default phyparam */ - ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr); - - /* Reset PHY */ - mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; - mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); - - } -} - -static int ipg_hw_init(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - void __iomem *ioaddr = sp->ioaddr; - unsigned int i; - int rc; - - /* Read/Write and Reset EEPROM Value */ - /* Read LED Mode Configuration from EEPROM */ - sp->led_mode = read_eeprom(dev, 6); - - /* Reset all functions within the IPG. Do not assert - * RST_OUT as not compatible with some PHYs. - */ - rc = ipg_reset(dev, IPG_RESET_MASK); - if (rc < 0) - goto out; - - ipg_init_mii(dev); - - /* Read MAC Address from EEPROM */ - for (i = 0; i < 3; i++) - sp->station_addr[i] = read_eeprom(dev, 16 + i); - - for (i = 0; i < 3; i++) - ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); - - /* Set station address in ethernet_device structure. */ - dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; - dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; - dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; - dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; - dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; - dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; -out: - return rc; -} - -static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - int rc; - - mutex_lock(&sp->mii_mutex); - rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); - mutex_unlock(&sp->mii_mutex); - - return rc; -} - -static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - int err; - - /* Function to accommodate changes to Maximum Transfer Unit - * (or MTU) of IPG NIC. Cannot use default function since - * the default will not allow for MTU > 1500 bytes. - */ - - IPG_DEBUG_MSG("_nic_change_mtu\n"); - - /* - * Check that the new MTU value is between 68 (14 byte header, 46 byte - * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. - */ - if (new_mtu < 68 || new_mtu > 10240) - return -EINVAL; - - err = ipg_nic_stop(dev); - if (err) - return err; - - dev->mtu = new_mtu; - - sp->max_rxframe_size = new_mtu; - - sp->rxfrag_size = new_mtu; - if (sp->rxfrag_size > 4088) - sp->rxfrag_size = 4088; - - sp->rxsupport_size = sp->max_rxframe_size; - - if (new_mtu > 0x0600) - sp->is_jumbo = true; - else - sp->is_jumbo = false; - - return ipg_nic_open(dev); -} - -static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - int rc; - - mutex_lock(&sp->mii_mutex); - rc = mii_ethtool_gset(&sp->mii_if, cmd); - mutex_unlock(&sp->mii_mutex); - - return rc; -} - -static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - int rc; - - mutex_lock(&sp->mii_mutex); - rc = mii_ethtool_sset(&sp->mii_if, cmd); - mutex_unlock(&sp->mii_mutex); - - return rc; -} - -static int ipg_nway_reset(struct net_device *dev) -{ - struct ipg_nic_private *sp = netdev_priv(dev); - int rc; - - mutex_lock(&sp->mii_mutex); - rc = mii_nway_restart(&sp->mii_if); - mutex_unlock(&sp->mii_mutex); - - return rc; -} - -static const struct ethtool_ops ipg_ethtool_ops = { - .get_settings = ipg_get_settings, - .set_settings = ipg_set_settings, - .nway_reset = ipg_nway_reset, -}; - -static void ipg_remove(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct ipg_nic_private *sp = netdev_priv(dev); - - IPG_DEBUG_MSG("_remove\n"); - - /* Un-register Ethernet device. */ - unregister_netdev(dev); - - pci_iounmap(pdev, sp->ioaddr); - - pci_release_regions(pdev); - - free_netdev(dev); - pci_disable_device(pdev); -} - -static const struct net_device_ops ipg_netdev_ops = { - .ndo_open = ipg_nic_open, - .ndo_stop = ipg_nic_stop, - .ndo_start_xmit = ipg_nic_hard_start_xmit, - .ndo_get_stats = ipg_nic_get_stats, - .ndo_set_rx_mode = ipg_nic_set_multicast_list, - .ndo_do_ioctl = ipg_ioctl, - .ndo_tx_timeout = ipg_tx_timeout, - .ndo_change_mtu = ipg_nic_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - unsigned int i = id->driver_data; - struct ipg_nic_private *sp; - struct net_device *dev; - void __iomem *ioaddr; - int rc; - - rc = pci_enable_device(pdev); - if (rc < 0) - goto out; - - pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]); - - pci_set_master(pdev); - - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); - if (rc < 0) { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc < 0) { - pr_err("%s: DMA config failed\n", pci_name(pdev)); - goto err_disable_0; - } - } - - /* - * Initialize net device. - */ - dev = alloc_etherdev(sizeof(struct ipg_nic_private)); - if (!dev) { - rc = -ENOMEM; - goto err_disable_0; - } - - sp = netdev_priv(dev); - spin_lock_init(&sp->lock); - mutex_init(&sp->mii_mutex); - - sp->is_jumbo = IPG_IS_JUMBO; - sp->rxfrag_size = IPG_RXFRAG_SIZE; - sp->rxsupport_size = IPG_RXSUPPORT_SIZE; - sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; - - /* Declare IPG NIC functions for Ethernet device methods. - */ - dev->netdev_ops = &ipg_netdev_ops; - SET_NETDEV_DEV(dev, &pdev->dev); - dev->ethtool_ops = &ipg_ethtool_ops; - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_free_dev_1; - - ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); - if (!ioaddr) { - pr_err("%s: cannot map MMIO\n", pci_name(pdev)); - rc = -EIO; - goto err_release_regions_2; - } - - /* Save the pointer to the PCI device information. */ - sp->ioaddr = ioaddr; - sp->pdev = pdev; - sp->dev = dev; - - INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); - - pci_set_drvdata(pdev, dev); - - rc = ipg_hw_init(dev); - if (rc < 0) - goto err_unmap_3; - - rc = register_netdev(dev); - if (rc < 0) - goto err_unmap_3; - - netdev_info(dev, "Ethernet device registered\n"); -out: - return rc; - -err_unmap_3: - pci_iounmap(pdev, ioaddr); -err_release_regions_2: - pci_release_regions(pdev); -err_free_dev_1: - free_netdev(dev); -err_disable_0: - pci_disable_device(pdev); - goto out; -} - -static struct pci_driver ipg_pci_driver = { - .name = IPG_DRIVER_NAME, - .id_table = ipg_pci_tbl, - .probe = ipg_probe, - .remove = ipg_remove, -}; - -module_pci_driver(ipg_pci_driver); diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h deleted file mode 100644 index de606281f97b..000000000000 --- a/drivers/net/ethernet/icplus/ipg.h +++ /dev/null @@ -1,748 +0,0 @@ -/* - * Include file for Gigabit Ethernet device driver for Network - * Interface Cards (NICs) utilizing the Tamarack Microelectronics - * Inc. IPG Gigabit or Triple Speed Ethernet Media Access - * Controller. - */ -#ifndef __LINUX_IPG_H -#define __LINUX_IPG_H - -#include <linux/module.h> - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/ioport.h> -#include <linux/errno.h> -#include <asm/io.h> -#include <linux/delay.h> -#include <linux/types.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <asm/bitops.h> - -/* - * Constants - */ - -/* GMII based PHY IDs */ -#define NS 0x2000 -#define MARVELL 0x0141 -#define ICPLUS_PHY 0x243 - -/* NIC Physical Layer Device MII register fields. */ -#define MII_PHY_SELECTOR_IEEE8023 0x0001 -#define MII_PHY_TECHABILITYFIELD 0x1FE0 - -/* GMII_PHY_1000 need to set to prefer master */ -#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 - -/* NIC Physical Layer Device GMII constants. */ -#define GMII_PREAMBLE 0xFFFFFFFF -#define GMII_ST 0x1 -#define GMII_READ 0x2 -#define GMII_WRITE 0x1 -#define GMII_TA_READ_MASK 0x1 -#define GMII_TA_WRITE 0x2 - -/* I/O register offsets. */ -enum ipg_regs { - DMA_CTRL = 0x00, - RX_DMA_STATUS = 0x08, /* Unused + reserved */ - TFD_LIST_PTR_0 = 0x10, - TFD_LIST_PTR_1 = 0x14, - TX_DMA_BURST_THRESH = 0x18, - TX_DMA_URGENT_THRESH = 0x19, - TX_DMA_POLL_PERIOD = 0x1a, - RFD_LIST_PTR_0 = 0x1c, - RFD_LIST_PTR_1 = 0x20, - RX_DMA_BURST_THRESH = 0x24, - RX_DMA_URGENT_THRESH = 0x25, - RX_DMA_POLL_PERIOD = 0x26, - DEBUG_CTRL = 0x2c, - ASIC_CTRL = 0x30, - FIFO_CTRL = 0x38, /* Unused */ - FLOW_OFF_THRESH = 0x3c, - FLOW_ON_THRESH = 0x3e, - EEPROM_DATA = 0x48, - EEPROM_CTRL = 0x4a, - EXPROM_ADDR = 0x4c, /* Unused */ - EXPROM_DATA = 0x50, /* Unused */ - WAKE_EVENT = 0x51, /* Unused */ - COUNTDOWN = 0x54, /* Unused */ - INT_STATUS_ACK = 0x5a, - INT_ENABLE = 0x5c, - INT_STATUS = 0x5e, /* Unused */ - TX_STATUS = 0x60, - MAC_CTRL = 0x6c, - VLAN_TAG = 0x70, /* Unused */ - PHY_SET = 0x75, - PHY_CTRL = 0x76, - STATION_ADDRESS_0 = 0x78, - STATION_ADDRESS_1 = 0x7a, - STATION_ADDRESS_2 = 0x7c, - MAX_FRAME_SIZE = 0x86, - RECEIVE_MODE = 0x88, - HASHTABLE_0 = 0x8c, - HASHTABLE_1 = 0x90, - RMON_STATISTICS_MASK = 0x98, - STATISTICS_MASK = 0x9c, - RX_JUMBO_FRAMES = 0xbc, /* Unused */ - TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */ - IP_CHECKSUM_ERRORS = 0xc2, /* Unused */ - UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */ - TX_JUMBO_FRAMES = 0xf4 /* Unused */ -}; - -/* Ethernet MIB statistic register offsets. */ -#define IPG_OCTETRCVOK 0xA8 -#define IPG_MCSTOCTETRCVDOK 0xAC -#define IPG_BCSTOCTETRCVOK 0xB0 -#define IPG_FRAMESRCVDOK 0xB4 -#define IPG_MCSTFRAMESRCVDOK 0xB8 -#define IPG_BCSTFRAMESRCVDOK 0xBE -#define IPG_MACCONTROLFRAMESRCVD 0xC6 -#define IPG_FRAMETOOLONGERRORS 0xC8 -#define IPG_INRANGELENGTHERRORS 0xCA -#define IPG_FRAMECHECKSEQERRORS 0xCC -#define IPG_FRAMESLOSTRXERRORS 0xCE -#define IPG_OCTETXMTOK 0xD0 -#define IPG_MCSTOCTETXMTOK 0xD4 -#define IPG_BCSTOCTETXMTOK 0xD8 -#define IPG_FRAMESXMTDOK 0xDC -#define IPG_MCSTFRAMESXMTDOK 0xE0 -#define IPG_FRAMESWDEFERREDXMT 0xE4 -#define IPG_LATECOLLISIONS 0xE8 -#define IPG_MULTICOLFRAMES 0xEC -#define IPG_SINGLECOLFRAMES 0xF0 -#define IPG_BCSTFRAMESXMTDOK 0xF6 -#define IPG_CARRIERSENSEERRORS 0xF8 -#define IPG_MACCONTROLFRAMESXMTDOK 0xFA -#define IPG_FRAMESABORTXSCOLLS 0xFC -#define IPG_FRAMESWEXDEFERRAL 0xFE - -/* RMON statistic register offsets. */ -#define IPG_ETHERSTATSCOLLISIONS 0x100 -#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 -#define IPG_ETHERSTATSPKTSTRANSMIT 0x108 -#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C -#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 -#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 -#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 -#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C -#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 -#define IPG_ETHERSTATSCRCALIGNERRORS 0x124 -#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 -#define IPG_ETHERSTATSFRAGMENTS 0x12C -#define IPG_ETHERSTATSJABBERS 0x130 -#define IPG_ETHERSTATSOCTETS 0x134 -#define IPG_ETHERSTATSPKTS 0x138 -#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C -#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 -#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 -#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 -#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C -#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 - -/* RMON statistic register equivalents. */ -#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 -#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 -#define IPG_ETHERSTATSMULTICASTPKTS 0xB8 -#define IPG_ETHERSTATSBROADCASTPKTS 0xBE -#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 -#define IPG_ETHERSTATSDROPEVENTS 0xCE - -/* Serial EEPROM offsets */ -#define IPG_EEPROM_CONFIGPARAM 0x00 -#define IPG_EEPROM_ASICCTRL 0x01 -#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 -#define IPG_EEPROM_SUBSYSTEMID 0x03 -#define IPG_EEPROM_STATIONADDRESS0 0x10 -#define IPG_EEPROM_STATIONADDRESS1 0x11 -#define IPG_EEPROM_STATIONADDRESS2 0x12 - -/* Register & data structure bit masks */ - -/* PCI register masks. */ - -/* IOBaseAddress */ -#define IPG_PIB_RSVD_MASK 0xFFFFFE01 -#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 -#define IPG_PIB_IOBASEADDRIND 0x00000001 - -/* MemBaseAddress */ -#define IPG_PMB_RSVD_MASK 0xFFFFFE07 -#define IPG_PMB_MEMBASEADDRIND 0x00000001 -#define IPG_PMB_MEMMAPTYPE 0x00000006 -#define IPG_PMB_MEMMAPTYPE0 0x00000002 -#define IPG_PMB_MEMMAPTYPE1 0x00000004 -#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 - -/* ConfigStatus */ -#define IPG_CS_RSVD_MASK 0xFFB0 -#define IPG_CS_CAPABILITIES 0x0010 -#define IPG_CS_66MHZCAPABLE 0x0020 -#define IPG_CS_FASTBACK2BACK 0x0080 -#define IPG_CS_DATAPARITYREPORTED 0x0100 -#define IPG_CS_DEVSELTIMING 0x0600 -#define IPG_CS_SIGNALEDTARGETABORT 0x0800 -#define IPG_CS_RECEIVEDTARGETABORT 0x1000 -#define IPG_CS_RECEIVEDMASTERABORT 0x2000 -#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 -#define IPG_CS_DETECTEDPARITYERROR 0x8000 - -/* TFD data structure masks. */ - -/* TFDList, TFC */ -#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL -#define IPG_TFC_FRAMEID 0x000000000000FFFFULL -#define IPG_TFC_WORDALIGN 0x0000000000030000ULL -#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL -#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL -#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL -#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL -#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL -#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL -#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL -#define IPG_TFC_TXINDICATE 0x0000000000400000ULL -#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL -#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL -#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL -#define IPG_TFC_TFDDONE 0x0000000080000000ULL -#define IPG_TFC_VID 0x00000FFF00000000ULL -#define IPG_TFC_CFI 0x0000100000000000ULL -#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL - -/* TFDList, FragInfo */ -#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL -#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL -#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL - -/* RFD data structure masks. */ - -/* RFDList, RFS */ -#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL -#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL -#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL -#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL -#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL -#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL -#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL -#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL -#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL -#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL -#define IPG_RFS_TCPERROR 0x0000000001000000ULL -#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL -#define IPG_RFS_UDPERROR 0x0000000004000000ULL -#define IPG_RFS_IPDETECTED 0x0000000008000000ULL -#define IPG_RFS_IPERROR 0x0000000010000000ULL -#define IPG_RFS_FRAMESTART 0x0000000020000000ULL -#define IPG_RFS_FRAMEEND 0x0000000040000000ULL -#define IPG_RFS_RFDDONE 0x0000000080000000ULL -#define IPG_RFS_TCI 0x0000FFFF00000000ULL - -/* RFDList, FragInfo */ -#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL -#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL -#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL - -/* I/O Register masks. */ - -/* RMON Statistics Mask */ -#define IPG_RZ_ALL 0x0FFFFFFF - -/* Statistics Mask */ -#define IPG_SM_ALL 0x0FFFFFFF -#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 -#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 -#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 -#define IPG_SM_RXJUMBOFRAMES 0x00000008 -#define IPG_SM_TCPCHECKSUMERRORS 0x00000010 -#define IPG_SM_IPCHECKSUMERRORS 0x00000020 -#define IPG_SM_UDPCHECKSUMERRORS 0x00000040 -#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 -#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 -#define IPG_SM_INRANGELENGTHERRORS 0x00000200 -#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 -#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 -#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 -#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 -#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 -#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 -#define IPG_SM_LATECOLLISIONS 0x00010000 -#define IPG_SM_MULTICOLFRAMES 0x00020000 -#define IPG_SM_SINGLECOLFRAMES 0x00040000 -#define IPG_SM_TXJUMBOFRAMES 0x00080000 -#define IPG_SM_CARRIERSENSEERRORS 0x00100000 -#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 -#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 -#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 - -/* Countdown */ -#define IPG_CD_RSVD_MASK 0x0700FFFF -#define IPG_CD_COUNT 0x0000FFFF -#define IPG_CD_COUNTDOWNSPEED 0x01000000 -#define IPG_CD_COUNTDOWNMODE 0x02000000 -#define IPG_CD_COUNTINTENABLED 0x04000000 - -/* TxDMABurstThresh */ -#define IPG_TB_RSVD_MASK 0xFF - -/* TxDMAUrgentThresh */ -#define IPG_TU_RSVD_MASK 0xFF - -/* TxDMAPollPeriod */ -#define IPG_TP_RSVD_MASK 0xFF - -/* RxDMAUrgentThresh */ -#define IPG_RU_RSVD_MASK 0xFF - -/* RxDMAPollPeriod */ -#define IPG_RP_RSVD_MASK 0xFF - -/* ReceiveMode */ -#define IPG_RM_RSVD_MASK 0x3F -#define IPG_RM_RECEIVEUNICAST 0x01 -#define IPG_RM_RECEIVEMULTICAST 0x02 -#define IPG_RM_RECEIVEBROADCAST 0x04 -#define IPG_RM_RECEIVEALLFRAMES 0x08 -#define IPG_RM_RECEIVEMULTICASTHASH 0x10 -#define IPG_RM_RECEIVEIPMULTICAST 0x20 - -/* PhySet */ -#define IPG_PS_MEM_LENB9B 0x01 -#define IPG_PS_MEM_LEN9 0x02 -#define IPG_PS_NON_COMPDET 0x04 - -/* PhyCtrl */ -#define IPG_PC_RSVD_MASK 0xFF -#define IPG_PC_MGMTCLK_LO 0x00 -#define IPG_PC_MGMTCLK_HI 0x01 -#define IPG_PC_MGMTCLK 0x01 -#define IPG_PC_MGMTDATA 0x02 -#define IPG_PC_MGMTDIR 0x04 -#define IPG_PC_DUPLEX_POLARITY 0x08 -#define IPG_PC_DUPLEX_STATUS 0x10 -#define IPG_PC_LINK_POLARITY 0x20 -#define IPG_PC_LINK_SPEED 0xC0 -#define IPG_PC_LINK_SPEED_10MBPS 0x40 -#define IPG_PC_LINK_SPEED_100MBPS 0x80 -#define IPG_PC_LINK_SPEED_1000MBPS 0xC0 - -/* DMACtrl */ -#define IPG_DC_RSVD_MASK 0xC07D9818 -#define IPG_DC_RX_DMA_COMPLETE 0x00000008 -#define IPG_DC_RX_DMA_POLL_NOW 0x00000010 -#define IPG_DC_TX_DMA_COMPLETE 0x00000800 -#define IPG_DC_TX_DMA_POLL_NOW 0x00001000 -#define IPG_DC_TX_DMA_IN_PROG 0x00008000 -#define IPG_DC_RX_EARLY_DISABLE 0x00010000 -#define IPG_DC_MWI_DISABLE 0x00040000 -#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 -#define IPG_DC_TX_BURST_LIMIT 0x00700000 -#define IPG_DC_TARGET_ABORT 0x40000000 -#define IPG_DC_MASTER_ABORT 0x80000000 - -/* ASICCtrl */ -#define IPG_AC_RSVD_MASK 0x07FFEFF2 -#define IPG_AC_EXP_ROM_SIZE 0x00000002 -#define IPG_AC_PHY_SPEED10 0x00000010 -#define IPG_AC_PHY_SPEED100 0x00000020 -#define IPG_AC_PHY_SPEED1000 0x00000040 -#define IPG_AC_PHY_MEDIA 0x00000080 -#define IPG_AC_FORCED_CFG 0x00000700 -#define IPG_AC_D3RESETDISABLE 0x00000800 -#define IPG_AC_SPEED_UP_MODE 0x00002000 -#define IPG_AC_LED_MODE 0x00004000 -#define IPG_AC_RST_OUT_POLARITY 0x00008000 -#define IPG_AC_GLOBAL_RESET 0x00010000 -#define IPG_AC_RX_RESET 0x00020000 -#define IPG_AC_TX_RESET 0x00040000 -#define IPG_AC_DMA 0x00080000 -#define IPG_AC_FIFO 0x00100000 -#define IPG_AC_NETWORK 0x00200000 -#define IPG_AC_HOST 0x00400000 -#define IPG_AC_AUTO_INIT 0x00800000 -#define IPG_AC_RST_OUT 0x01000000 -#define IPG_AC_INT_REQUEST 0x02000000 -#define IPG_AC_RESET_BUSY 0x04000000 -#define IPG_AC_LED_SPEED 0x08000000 -#define IPG_AC_LED_MODE_BIT_1 0x20000000 - -/* EepromCtrl */ -#define IPG_EC_RSVD_MASK 0x83FF -#define IPG_EC_EEPROM_ADDR 0x00FF -#define IPG_EC_EEPROM_OPCODE 0x0300 -#define IPG_EC_EEPROM_SUBCOMMAD 0x0000 -#define IPG_EC_EEPROM_WRITEOPCODE 0x0100 -#define IPG_EC_EEPROM_READOPCODE 0x0200 -#define IPG_EC_EEPROM_ERASEOPCODE 0x0300 -#define IPG_EC_EEPROM_BUSY 0x8000 - -/* FIFOCtrl */ -#define IPG_FC_RSVD_MASK 0xC001 -#define IPG_FC_RAM_TEST_MODE 0x0001 -#define IPG_FC_TRANSMITTING 0x4000 -#define IPG_FC_RECEIVING 0x8000 - -/* TxStatus */ -#define IPG_TS_RSVD_MASK 0xFFFF00DD -#define IPG_TS_TX_ERROR 0x00000001 -#define IPG_TS_LATE_COLLISION 0x00000004 -#define IPG_TS_TX_MAX_COLL 0x00000008 -#define IPG_TS_TX_UNDERRUN 0x00000010 -#define IPG_TS_TX_IND_REQD 0x00000040 -#define IPG_TS_TX_COMPLETE 0x00000080 -#define IPG_TS_TX_FRAMEID 0xFFFF0000 - -/* WakeEvent */ -#define IPG_WE_WAKE_PKT_ENABLE 0x01 -#define IPG_WE_MAGIC_PKT_ENABLE 0x02 -#define IPG_WE_LINK_EVT_ENABLE 0x04 -#define IPG_WE_WAKE_POLARITY 0x08 -#define IPG_WE_WAKE_PKT_EVT 0x10 -#define IPG_WE_MAGIC_PKT_EVT 0x20 -#define IPG_WE_LINK_EVT 0x40 -#define IPG_WE_WOL_ENABLE 0x80 - -/* IntEnable */ -#define IPG_IE_RSVD_MASK 0x1FFE -#define IPG_IE_HOST_ERROR 0x0002 -#define IPG_IE_TX_COMPLETE 0x0004 -#define IPG_IE_MAC_CTRL_FRAME 0x0008 -#define IPG_IE_RX_COMPLETE 0x0010 -#define IPG_IE_RX_EARLY 0x0020 -#define IPG_IE_INT_REQUESTED 0x0040 -#define IPG_IE_UPDATE_STATS 0x0080 -#define IPG_IE_LINK_EVENT 0x0100 -#define IPG_IE_TX_DMA_COMPLETE 0x0200 -#define IPG_IE_RX_DMA_COMPLETE 0x0400 -#define IPG_IE_RFD_LIST_END 0x0800 -#define IPG_IE_RX_DMA_PRIORITY 0x1000 - -/* IntStatus */ -#define IPG_IS_RSVD_MASK 0x1FFF -#define IPG_IS_INTERRUPT_STATUS 0x0001 -#define IPG_IS_HOST_ERROR 0x0002 -#define IPG_IS_TX_COMPLETE 0x0004 -#define IPG_IS_MAC_CTRL_FRAME 0x0008 -#define IPG_IS_RX_COMPLETE 0x0010 -#define IPG_IS_RX_EARLY 0x0020 -#define IPG_IS_INT_REQUESTED 0x0040 -#define IPG_IS_UPDATE_STATS 0x0080 -#define IPG_IS_LINK_EVENT 0x0100 -#define IPG_IS_TX_DMA_COMPLETE 0x0200 -#define IPG_IS_RX_DMA_COMPLETE 0x0400 -#define IPG_IS_RFD_LIST_END 0x0800 -#define IPG_IS_RX_DMA_PRIORITY 0x1000 - -/* MACCtrl */ -#define IPG_MC_RSVD_MASK 0x7FE33FA3 -#define IPG_MC_IFS_SELECT 0x00000003 -#define IPG_MC_IFS_4352BIT 0x00000003 -#define IPG_MC_IFS_1792BIT 0x00000002 -#define IPG_MC_IFS_1024BIT 0x00000001 -#define IPG_MC_IFS_96BIT 0x00000000 -#define IPG_MC_DUPLEX_SELECT 0x00000020 -#define IPG_MC_DUPLEX_SELECT_FD 0x00000020 -#define IPG_MC_DUPLEX_SELECT_HD 0x00000000 -#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 -#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 -#define IPG_MC_RCV_FCS 0x00000200 -#define IPG_MC_FIFO_LOOPBACK 0x00000400 -#define IPG_MC_MAC_LOOPBACK 0x00000800 -#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 -#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 -#define IPG_MC_COLLISION_DETECT 0x00010000 -#define IPG_MC_CARRIER_SENSE 0x00020000 -#define IPG_MC_STATISTICS_ENABLE 0x00200000 -#define IPG_MC_STATISTICS_DISABLE 0x00400000 -#define IPG_MC_STATISTICS_ENABLED 0x00800000 -#define IPG_MC_TX_ENABLE 0x01000000 -#define IPG_MC_TX_DISABLE 0x02000000 -#define IPG_MC_TX_ENABLED 0x04000000 -#define IPG_MC_RX_ENABLE 0x08000000 -#define IPG_MC_RX_DISABLE 0x10000000 -#define IPG_MC_RX_ENABLED 0x20000000 -#define IPG_MC_PAUSED 0x40000000 - -/* - * Tune - */ - -/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ -#define IPG_APPEND_FCS_ON_TX 1 - -/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ -#define IPG_STRIP_FCS_ON_RX 1 - -/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with - * Ethernet errors. - */ -#define IPG_DROP_ON_RX_ETH_ERRORS 1 - -/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually - * (via TFC). - */ -#define IPG_INSERT_MANUAL_VLAN_TAG 0 - -/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ -#define IPG_ADD_IPCHECKSUM_ON_TX 0 - -/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. - * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. - */ -#define IPG_ADD_TCPCHECKSUM_ON_TX 0 - -/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. - * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. - */ -#define IPG_ADD_UDPCHECKSUM_ON_TX 0 - -/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx - * constants as desired. - */ -#define IPG_MANUAL_VLAN_VID 0xABC -#define IPG_MANUAL_VLAN_CFI 0x1 -#define IPG_MANUAL_VLAN_USERPRIORITY 0x5 - -#define IPG_IO_REG_RANGE 0xFF -#define IPG_MEM_REG_RANGE 0x154 -#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" -#define IPG_NIC_PHY_ADDRESS 0x01 -#define IPG_DMALIST_ALIGN_PAD 0x07 -#define IPG_MULTICAST_HASHTABLE_SIZE 0x40 - -/* Number of milliseconds to wait after issuing a software reset. - * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. - */ -#define IPG_AC_RESETWAIT 0x05 - -/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ -#define IPG_AC_RESET_TIMEOUT 0x0A - -/* Minimum number of nanoseconds used to toggle MDC clock during - * MII/GMII register access. - */ -#define IPG_PC_PHYCTRLWAIT_NS 200 - -#define IPG_TFDLIST_LENGTH 0x100 - -/* Number of frames between TxDMAComplete interrupt. - * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH - */ -#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 - -#define IPG_RFDLIST_LENGTH 0x100 - -/* Maximum number of RFDs to process per interrupt. - * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH - */ -#define IPG_MAXRFDPROCESS_COUNT 0x80 - -/* Minimum margin between last freed RFD, and current RFD. - * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH - */ -#define IPG_MINUSEDRFDSTOFREE 0x80 - -/* specify the jumbo frame maximum size - * per unit is 0x600 (the rx_buffer size that one RFD can carry) - */ -#define MAX_JUMBOSIZE 0x8 /* max is 12K */ - -/* Key register values loaded at driver start up. */ - -/* TXDMAPollPeriod is specified in 320ns increments. - * - * Value Time - * --------------------- - * 0x00-0x01 320ns - * 0x03 ~1us - * 0x1F ~10us - * 0xFF ~82us - */ -#define IPG_TXDMAPOLLPERIOD_VALUE 0x26 - -/* TxDMAUrgentThresh specifies the minimum amount of - * data in the transmit FIFO before asserting an - * urgent transmit DMA request. - * - * Value Min TxFIFO occupied space before urgent TX request - * --------------------------------------------------------------- - * 0x00-0x04 128 bytes (1024 bits) - * 0x27 1248 bytes (~10000 bits) - * 0x30 1536 bytes (12288 bits) - * 0xFF 8192 bytes (65535 bits) - */ -#define IPG_TXDMAURGENTTHRESH_VALUE 0x04 - -/* TxDMABurstThresh specifies the minimum amount of - * free space in the transmit FIFO before asserting an - * transmit DMA request. - * - * Value Min TxFIFO free space before TX request - * ---------------------------------------------------- - * 0x00-0x08 256 bytes - * 0x30 1536 bytes - * 0xFF 8192 bytes - */ -#define IPG_TXDMABURSTTHRESH_VALUE 0x30 - -/* RXDMAPollPeriod is specified in 320ns increments. - * - * Value Time - * --------------------- - * 0x00-0x01 320ns - * 0x03 ~1us - * 0x1F ~10us - * 0xFF ~82us - */ -#define IPG_RXDMAPOLLPERIOD_VALUE 0x01 - -/* RxDMAUrgentThresh specifies the minimum amount of - * free space within the receive FIFO before asserting - * a urgent receive DMA request. - * - * Value Min RxFIFO free space before urgent RX request - * --------------------------------------------------------------- - * 0x00-0x04 128 bytes (1024 bits) - * 0x27 1248 bytes (~10000 bits) - * 0x30 1536 bytes (12288 bits) - * 0xFF 8192 bytes (65535 bits) - */ -#define IPG_RXDMAURGENTTHRESH_VALUE 0x30 - -/* RxDMABurstThresh specifies the minimum amount of - * occupied space within the receive FIFO before asserting - * a receive DMA request. - * - * Value Min TxFIFO free space before TX request - * ---------------------------------------------------- - * 0x00-0x08 256 bytes - * 0x30 1536 bytes - * 0xFF 8192 bytes - */ -#define IPG_RXDMABURSTTHRESH_VALUE 0x30 - -/* FlowOnThresh specifies the maximum amount of occupied - * space in the receive FIFO before a PAUSE frame with - * maximum pause time transmitted. - * - * Value Max RxFIFO occupied space before PAUSE - * --------------------------------------------------- - * 0x0000 0 bytes - * 0x0740 29,696 bytes - * 0x07FF 32,752 bytes - */ -#define IPG_FLOWONTHRESH_VALUE 0x0740 - -/* FlowOffThresh specifies the minimum amount of occupied - * space in the receive FIFO before a PAUSE frame with - * zero pause time is transmitted. - * - * Value Max RxFIFO occupied space before PAUSE - * --------------------------------------------------- - * 0x0000 0 bytes - * 0x00BF 3056 bytes - * 0x07FF 32,752 bytes - */ -#define IPG_FLOWOFFTHRESH_VALUE 0x00BF - -/* - * Miscellaneous macros. - */ - -/* Macros for printing debug statements. */ -#ifdef IPG_DEBUG -# define IPG_DEBUG_MSG(fmt, args...) \ -do { \ - if (0) \ - printk(KERN_DEBUG "IPG: " fmt, ##args); \ -} while (0) -# define IPG_DDEBUG_MSG(fmt, args...) \ - printk(KERN_DEBUG "IPG: " fmt, ##args) -# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) -# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) -#else -# define IPG_DEBUG_MSG(fmt, args...) \ -do { \ - if (0) \ - printk(KERN_DEBUG "IPG: " fmt, ##args); \ -} while (0) -# define IPG_DDEBUG_MSG(fmt, args...) \ -do { \ - if (0) \ - printk(KERN_DEBUG "IPG: " fmt, ##args); \ -} while (0) -# define IPG_DUMPRFDLIST(args) -# define IPG_DUMPTFDLIST(args) -#endif - -/* - * End miscellaneous macros. - */ - -/* Transmit Frame Descriptor. The IPG supports 15 fragments, - * however Linux requires only a single fragment. Note, each - * TFD field is 64 bits wide. - */ -struct ipg_tx { - __le64 next_desc; - __le64 tfc; - __le64 frag_info; -}; - -/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. - */ -struct ipg_rx { - __le64 next_desc; - __le64 rfs; - __le64 frag_info; -}; - -struct ipg_jumbo { - int found_start; - int current_size; - struct sk_buff *skb; -}; - -/* Structure of IPG NIC specific data. */ -struct ipg_nic_private { - void __iomem *ioaddr; - struct ipg_tx *txd; - struct ipg_rx *rxd; - dma_addr_t txd_map; - dma_addr_t rxd_map; - struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH]; - struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH]; - unsigned int tx_current; - unsigned int tx_dirty; - unsigned int rx_current; - unsigned int rx_dirty; - bool is_jumbo; - struct ipg_jumbo jumbo; - unsigned long rxfrag_size; - unsigned long rxsupport_size; - unsigned long max_rxframe_size; - unsigned int rx_buf_sz; - struct pci_dev *pdev; - struct net_device *dev; - struct net_device_stats stats; - spinlock_t lock; - int tenmbpsmode; - - u16 led_mode; - u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ - - struct mutex mii_mutex; - struct mii_if_info mii_if; - int reset_current_tfd; -#ifdef IPG_DEBUG - int RFDlistendCount; - int RFDListCheckedCount; - int EmptyRFDListCount; -#endif - struct delayed_work task; -}; - -#endif /* __LINUX_IPG_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 639263d5e833..7781e80896a6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* verify the skb head is not shared */ err = skb_cow_head(skb, 0); - if (err) + if (err) { + dev_kfree_skb(skb); return NETDEV_TX_OK; + } /* locate vlan header */ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e84c7f2634d3..ed622fa29dfa 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -36,7 +36,7 @@ /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) -#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) +#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) @@ -62,6 +62,7 @@ #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) #define MVNETA_BASE_ADDR_ENABLE 0x2290 +#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 #define MVNETA_PORT_CONFIG 0x2400 #define MVNETA_UNI_PROMISC_MODE BIT(0) #define MVNETA_DEF_RXQ(q) ((q) << 1) @@ -159,7 +160,7 @@ #define MVNETA_INTR_ENABLE 0x25b8 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 -#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff #define MVNETA_RXQ_CMD 0x2680 #define MVNETA_RXQ_DISABLE_SHIFT 8 @@ -242,6 +243,7 @@ #define MVNETA_VLAN_TAG_LEN 4 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 +#define MVNETA_TX_CSUM_DEF_SIZE 1600 #define MVNETA_TX_CSUM_MAX_SIZE 9800 #define MVNETA_ACC_MODE_EXT 1 @@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); - if (!skb) - goto err_drop_frame; + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ dma_unmap_single(dev->dev.parent, phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + if (!skb) + goto err_drop_frame; + rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, } mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); } /* Power up the port */ @@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev) char hw_mac_addr[ETH_ALEN]; const char *mac_from; const char *managed; + int tx_csum_limit; int phy_mode; int err; int cpu; @@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(dn, "marvell,armada-370-neta")) - pp->tx_csum_limit = 1600; + if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { + if (tx_csum_limit < 0 || + tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + dev_info(&pdev->dev, + "Wrong TX csum limit in DT, set to %dB\n", + MVNETA_TX_CSUM_DEF_SIZE); + } + } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + } else { + tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; + } + + pp->tx_csum_limit = tx_csum_limit; pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 85f1b1e7e505..31c491e02e69 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; dev->caps.port_mask[i] = dev->caps.port_type[i]; dev->caps.phys_port_id[i] = func_cap.phys_port_id; - if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, - &dev->caps.gid_table_len[i], - &dev->caps.pkey_table_len[i])) + err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, + &dev->caps.gid_table_len[i], + &dev->caps.pkey_table_len[i]); + if (err) goto err_mem; } @@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.uar_page_size * dev->caps.num_uars, (unsigned long long) pci_resource_len(dev->persist->pdev, 2)); + err = -ENOMEM; goto err_mem; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 9813d34f3e5b..6fec3e993d02 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) struct res_counter *counter; struct res_counter *tmp; int err; - int index; + int *counters_arr = NULL; + int i, j; err = move_all_busy(dev, slave, RES_COUNTER); if (err) mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", slave); - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(counter, tmp, counter_list, com.list) { - if (counter->com.owner == slave) { - index = counter->com.res_id; - rb_erase(&counter->com.node, - &tracker->res_tree[RES_COUNTER]); - list_del(&counter->com.list); - kfree(counter); - __mlx4_counter_free(dev, index); + counters_arr = kmalloc_array(dev->caps.max_counters, + sizeof(*counters_arr), GFP_KERNEL); + if (!counters_arr) + return; + + do { + i = 0; + j = 0; + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(counter, tmp, counter_list, com.list) { + if (counter->com.owner == slave) { + counters_arr[i++] = counter->com.res_id; + rb_erase(&counter->com.node, + &tracker->res_tree[RES_COUNTER]); + list_del(&counter->com.list); + kfree(counter); + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + while (j < i) { + __mlx4_counter_free(dev, counters_arr[j++]); mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); } - } - spin_unlock_irq(mlx4_tlock(dev)); + } while (i); + + kfree(counters_arr); } static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f2ae62dd8c09..22e72bf1ae48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb { #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) +enum mlx5e_dma_map_type { + MLX5E_DMA_MAP_SINGLE, + MLX5E_DMA_MAP_PAGE +}; + struct mlx5e_sq_dma { - dma_addr_t addr; - u32 size; + dma_addr_t addr; + u32 size; + enum mlx5e_dma_map_type type; }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5fc4d2d78cdf..1e52db32c73d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) return err; } +static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, + u32 tirn) +{ + void *in; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + + err = mlx5_core_modify_tir(mdev, tirn, in, inlen); + + kvfree(in); + + return err; +} + +static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) +{ + int err; + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev, + priv->tirn[i]); + if (err) + return err; + } + + return 0; +} + static int mlx5e_set_dev_port_mtu(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_clear_state_opened_flag; } + err = mlx5e_refresh_tirs_self_loopback_enable(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", + __func__, err); + goto err_close_channels; + } + mlx5e_update_carrier(priv); mlx5e_redirect_rqts(priv); @@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev) return 0; +err_close_channels: + mlx5e_close_channels(priv); err_clear_state_opened_flag: clear_bit(MLX5E_STATE_OPENED, &priv->state); return err; @@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + max_mtu = MLX5E_HW2SW_MTU(max_mtu); + if (new_mtu > max_mtu) { netdev_err(netdev, "%s: Bad MTU (%d) > (%d) Max\n", @@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) "Not creating net device, some required device capabilities are missing\n"); return -ENOTSUPP; } + if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) + mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index cd8f85a251d7..1341b1d3c421 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) } } -static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, - u32 *size) +static inline void mlx5e_tx_dma_unmap(struct device *pdev, + struct mlx5e_sq_dma *dma) { - sq->dma_fifo_pc--; - *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; - *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; -} - -static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) -{ - dma_addr_t addr; - u32 size; - int i; - - for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { - mlx5e_dma_pop_last_pushed(sq, &addr, &size); - dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); + switch (dma->type) { + case MLX5E_DMA_MAP_SINGLE: + dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + case MLX5E_DMA_MAP_PAGE: + dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + default: + WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); } } -static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, - u32 size) +static inline void mlx5e_dma_push(struct mlx5e_sq *sq, + dma_addr_t addr, + u32 size, + enum mlx5e_dma_map_type map_type) { sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type; sq->dma_fifo_pc++; } -static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, - u32 *size) +static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) { - *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; - *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; + return &sq->dma_fifo[i & sq->dma_fifo_mask]; +} + +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + int i; + + for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { + struct mlx5e_sq_dma *last_pushed_dma = + mlx5e_dma_get(sq, --sq->dma_fifo_pc); + + mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); + } } u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, @@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, */ #define MLX5E_MIN_INLINE ETH_HLEN - if (bf && (skb_headlen(skb) <= sq->max_inline)) - return skb_headlen(skb); + if (bf) { + u16 ihs = skb_headlen(skb); + + if (skb_vlan_tag_present(skb)) + ihs += VLAN_HLEN; + + if (ihs <= sq->max_inline) + return skb_headlen(skb); + } return MLX5E_MIN_INLINE; } @@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(headlen); - mlx5e_dma_push(sq, dma_addr, headlen); + mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); MLX5E_TX_SKB_CB(skb)->num_dma++; dseg++; @@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(fsz); - mlx5e_dma_push(sq, dma_addr, fsz); + mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); MLX5E_TX_SKB_CB(skb)->num_dma++; dseg++; @@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) } for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { - dma_addr_t addr; - u32 size; + struct mlx5e_sq_dma *dma = + mlx5e_dma_get(sq, dma_fifo_cc++); - mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); - dma_fifo_cc++; - dma_unmap_single(sq->pdev, addr, size, - DMA_TO_DEVICE); + mlx5e_tx_dma_unmap(sq->pdev, dma); } npkts++; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index b159ef8303cc..057665180f13 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) /* Get platform resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { + if (!res || irq < 0) { dev_err(&pdev->dev, "error getting resources.\n"); ret = -ENXIO; goto err_exit; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b4f21232019a..79ef799f88ab 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7429,15 +7429,15 @@ process_pkt: rtl8169_rx_vlan_tag(desc, skb); + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + napi_gro_receive(&tp->napi, skb); u64_stats_update_begin(&tp->rx_stats.syncp); tp->rx_stats.packets++; tp->rx_stats.bytes += pkt_size; u64_stats_update_end(&tp->rx_stats.syncp); - - if (skb->pkt_type == PACKET_MULTICAST) - dev->stats.multicast++; } release_descriptor: desc->opts2 = 0; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index aa7b2083cb53..ed5da4d47668 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev) /* Interrupt enable: */ /* Frame receive */ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); - /* Receive FIFO full warning */ - ravb_write(ndev, RIC1_RFWE, RIC1); /* Receive FIFO full error, descriptor empty */ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ @@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) ((tis & tic) & BIT(q))) { if (napi_schedule_prep(&priv->napi[q])) { /* Mask RX and TX interrupts */ - ravb_write(ndev, ric0 & ~BIT(q), RIC0); - ravb_write(ndev, tic & ~BIT(q), TIC); + ric0 &= ~BIT(q); + tic &= ~BIT(q); + ravb_write(ndev, ric0, RIC0); + ravb_write(ndev, tic, TIC); __napi_schedule(&priv->napi[q]); } else { netdev_warn(ndev, @@ -1225,7 +1225,7 @@ static int ravb_open(struct net_device *ndev) /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq; + goto out_free_irq2; ravb_emac_init(ndev); /* Initialise PTP Clock driver */ @@ -1243,9 +1243,11 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ ravb_ptp_stop(ndev); +out_free_irq2: + if (priv->chip_id == RCAR_GEN3) + free_irq(priv->emac_irq, ndev); out_free_irq: free_irq(ndev->irq, ndev); - free_irq(priv->emac_irq, ndev); out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index d288f1c928de..a3c42a376741 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3422,7 +3422,7 @@ out: * with our request for slot reset the mmio_enabled callback will never be * called, and the link_reset callback is not used by AER or EEH mechanisms. */ -static struct pci_error_handlers efx_err_handlers = { +static const struct pci_error_handlers efx_err_handlers = { .error_detected = efx_io_error_detected, .slot_reset = efx_io_slot_reset, .resume = efx_io_resume, diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index c860c9007e49..219a99b7a631 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) static int smsc911x_phy_reset(struct smsc911x_data *pdata) { - struct phy_device *phy_dev = pdata->phy_dev; unsigned int temp; unsigned int i = 100000; - BUG_ON(!phy_dev); - BUG_ON(!phy_dev->bus); - - SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); + temp = smsc911x_reg_read(pdata, PMT_CTRL); + smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_); do { msleep(1); - temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, - MII_BMCR); - } while ((i--) && (temp & BMCR_RESET)); + temp = smsc911x_reg_read(pdata, PMT_CTRL); + } while ((i--) && (temp & PMT_CTRL_PHY_RST_)); - if (temp & BMCR_RESET) { + if (unlikely(temp & PMT_CTRL_PHY_RST_)) { SMSC_WARN(pdata, hw, "PHY reset failed to complete"); return -EIO; } @@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev) } /* Reset the LAN911x */ - if (smsc911x_soft_reset(pdata)) + if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata)) return -ENODEV; dev->flags |= IFF_MULTICAST; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 9d89bdbf029f..82de68b1a452 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) QSGMII_PHY_RX_SIGNAL_DETECT_EN | QSGMII_PHY_TX_DRIVER_EN | QSGMII_PHY_QSGMII_EN | - 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | - 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | - 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | - 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | - 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); + 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | + 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET | + 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET | + 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET | + 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET); } plat_dat->has_gmac = true; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 7f6f4a4fcc70..58c05acc2aab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { const char *rs; + dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; + err = of_property_read_string(np, "st,tx-retime-src", &rs); if (err < 0) { dev_warn(dev, "Use internal clock source\n"); - dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; - } else if (!strcasecmp(rs, "clk_125")) { - dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; - } else if (!strcasecmp(rs, "txclk")) { - dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; + } else { + if (!strcasecmp(rs, "clk_125")) + dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; + else if (!strcasecmp(rs, "txclk")) + dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; } - dwmac->speed = SPEED_1000; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 64d8aa4e0cad..3c6549aee11d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } } @@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + /* check if frame_len fits the preallocated memory */ + if (frame_len > priv->dma_buf_sz) { + priv->dev->stats.rx_length_errors++; + break; + } + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) */ @@ -3102,6 +3108,7 @@ int stmmac_resume(struct net_device *ndev) init_dma_desc_rings(ndev, GFP_ATOMIC); stmmac_hw_setup(ndev, false); stmmac_init_tx_coalesce(priv); + stmmac_set_rx_mode(ndev); napi_enable(&priv->napi); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ebf6abc4853f..bba670c42e37 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus) #ifdef CONFIG_OF if (priv->device->of_node) { - int reset_gpio, active_low; if (data->reset_gpio < 0) { struct device_node *np = priv->device->of_node; @@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus) "snps,reset-active-low"); of_property_read_u32_array(np, "snps,reset-delays-us", data->delays, 3); - } - reset_gpio = data->reset_gpio; - active_low = data->active_low; + if (gpio_request(data->reset_gpio, "mdio-reset")) + return 0; + } - if (!gpio_request(reset_gpio, "mdio-reset")) { - gpio_direction_output(reset_gpio, active_low ? 1 : 0); - if (data->delays[0]) - msleep(DIV_ROUND_UP(data->delays[0], 1000)); + gpio_direction_output(data->reset_gpio, + data->active_low ? 1 : 0); + if (data->delays[0]) + msleep(DIV_ROUND_UP(data->delays[0], 1000)); - gpio_set_value(reset_gpio, active_low ? 0 : 1); - if (data->delays[1]) - msleep(DIV_ROUND_UP(data->delays[1], 1000)); + gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1); + if (data->delays[1]) + msleep(DIV_ROUND_UP(data->delays[1], 1000)); - gpio_set_value(reset_gpio, active_low ? 1 : 0); - if (data->delays[2]) - msleep(DIV_ROUND_UP(data->delays[2], 1000)); - } + gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0); + if (data->delays[2]) + msleep(DIV_ROUND_UP(data->delays[2], 1000)); } #endif diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index c08be62bceba..1562ab4151e1 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave, int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) { + if (of_machine_is_compatible("ti,dm8148")) + return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); + if (of_machine_is_compatible("ti,am33xx")) return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index ae68afd50a15..f38696ceee74 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability"); */ VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); -#define VAL_PKT_LEN_DEF 0 -/* ValPktLen[] is used for setting the checksum offload ability of NIC. - 0: Receive frame with invalid layer 2 length (Default) - 1: Drop frame with invalid layer 2 length -*/ -VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); - #define WOL_OPT_DEF 0 #define WOL_OPT_MIN 0 #define WOL_OPT_MAX 7 @@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index, velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); - velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); opts->numrx = (opts->numrx & ~3); @@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; struct sk_buff *skb; - if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { - VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); + if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) { + if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); stats->rx_length_errors++; return -EINVAL; } @@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, DMA_FROM_DEVICE); - /* - * Drop frame not meeting IEEE 802.3 - */ - - if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { - if (rd->rdesc0.RSR & RSR_RL) { - stats->rx_length_errors++; - return -EINVAL; - } - } - velocity_rx_csum(rd, skb); if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index bb8b5304d851..b103adb8d62e 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid) FJES_CMD_REQ_RES_CODE_BUSY) && (timeout > 0)) { msleep(200 + hw->my_epid * 20); - timeout -= (200 + hw->my_epid * 20); + timeout -= (200 + hw->my_epid * 20); res_buf->unshare_buffer.length = 0; res_buf->unshare_buffer.code = 0; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index d50887e3df6d..8c48bb2a94ea 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -254,7 +254,7 @@ acct: } } -static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, +static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, bool local) { struct ipvl_dev *ipvlan = addr->master; @@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, unsigned int len; rx_handler_result_t ret = RX_HANDLER_CONSUMED; bool success = false; + struct sk_buff *skb = *pskb; len = skb->len + ETH_HLEN; if (unlikely(!(dev->flags & IFF_UP))) { @@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, if (!skb) goto out; + *pskb = skb; skb->dev = dev; skb->pkt_type = PACKET_HOST; @@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); if (addr) - return ipvlan_rcv_frame(addr, skb, true); + return ipvlan_rcv_frame(addr, &skb, true); out: skb->dev = ipvlan->phy_dev; @@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) if (lyr3h) { addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); if (addr) - return ipvlan_rcv_frame(addr, skb, true); + return ipvlan_rcv_frame(addr, &skb, true); } skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) @@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); if (addr) - ret = ipvlan_rcv_frame(addr, skb, false); + ret = ipvlan_rcv_frame(addr, pskb, false); out: return ret; @@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); if (addr) - ret = ipvlan_rcv_frame(addr, skb, false); + ret = ipvlan_rcv_frame(addr, pskb, false); } return ret; @@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", port->mode); kfree_skb(skb); - return NET_RX_DROP; + return RX_HANDLER_CONSUMED; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 86f6c6292c27..06c8bfeaccd6 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); if (!skb) return RX_HANDLER_CONSUMED; + *pskb = skb; eth = eth_hdr(skb); macvlan_forward_source(skb, port, eth->h_source); src = macvlan_hash_lookup(port, eth->h_source); @@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) goto out; } + *pskb = skb; skb->dev = dev; skb->pkt_type = PACKET_HOST; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 54036ae0a388..0fc521941c71 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk) wait_queue_head_t *wqueue; if (!sock_writeable(sk) || - !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) + !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); @@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || - (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && sock_writeable(&q->sk))) mask |= POLLOUT | POLLWRNORM; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index fabf11d32d27..2d020a3ec0b5 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .ack_interrupt = at803x_ack_interrupt, + .config_intr = at803x_config_intr, .driver = { .owner = THIS_MODULE, }, @@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .ack_interrupt = at803x_ack_interrupt, + .config_intr = at803x_config_intr, .driver = { .owner = THIS_MODULE, }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 07a6119121c3..3ce5d9514623 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5461, 0xfffffff0 }, { PHY_ID_BCM54616S, 0xfffffff0 }, { PHY_ID_BCM5464, 0xfffffff0 }, - { PHY_ID_BCM5482, 0xfffffff0 }, + { PHY_ID_BCM5481, 0xfffffff0 }, { PHY_ID_BCM5482, 0xfffffff0 }, { PHY_ID_BCM50610, 0xfffffff0 }, { PHY_ID_BCM50610M, 0xfffffff0 }, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5de8d5827536..0240552b50f3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { + .phy_id = MARVELL_PHY_ID_88E1540, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1540", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &m88e1510_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .driver = { .owner = THIS_MODULE }, + }, + { .phy_id = MARVELL_PHY_ID_88E3016, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E3016", @@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index adb48abafc87..47cd306dbb3c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) mdiobus_write(phydev->bus, mii_data->phy_id, mii_data->reg_num, val); - if (mii_data->reg_num == MII_BMCR && + if (mii_data->phy_id == phydev->addr && + mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); @@ -863,6 +864,9 @@ void phy_state_machine(struct work_struct *work) needs_aneg = true; break; case PHY_NOLINK: + if (phy_interrupt_is_valid(phydev)) + break; + err = phy_read_status(phydev); if (err) break; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 76cad712ddb2..dd295dbaa074 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -66,6 +66,7 @@ #define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8574 0x000704a0 +#define PHY_ID_VSC8601 0x00070420 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev) (phydev->drv->phy_id == PHY_ID_VSC8234 || phydev->drv->phy_id == PHY_ID_VSC8244 || phydev->drv->phy_id == PHY_ID_VSC8514 || - phydev->drv->phy_id == PHY_ID_VSC8574) ? + phydev->drv->phy_id == PHY_ID_VSC8574 || + phydev->drv->phy_id == PHY_ID_VSC8601) ? MII_VSC8244_IMASK_MASK : MII_VSC8221_IMASK_MASK); else { @@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = { .config_intr = &vsc82xx_config_intr, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_VSC8601, + .name = "Vitesse VSC8601", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &genphy_config_init, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b1878faea397..f0db770e8b2f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) mask |= POLLIN | POLLRDNORM; if (sock_writeable(sk) || - (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && sock_writeable(sk))) mask |= POLLOUT | POLLWRNORM; @@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk) if (!sock_writeable(sk)) return; - if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) + if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c78d3cb1b464..3da70bf9936a 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -696,6 +696,11 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (kernel_ulong_t) &wwan_info, }, { + /* Dell DW5580 modules */ + USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = (kernel_ulong_t)&wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a187f08113ec..3b1ba8237768 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) { - const struct usb_cdc_union_desc *union_desc = NULL; struct cdc_ncm_ctx *ctx; struct usb_driver *driver; u8 *buf; @@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ /* parse through descriptors associated with control interface */ cdc_parse_cdc_header(&hdr, intf, buf, len); - ctx->data = usb_ifnum_to_if(dev->udev, - hdr.usb_cdc_union_desc->bSlaveInterface0); + if (hdr.usb_cdc_union_desc) + ctx->data = usb_ifnum_to_if(dev->udev, + hdr.usb_cdc_union_desc->bSlaveInterface0); ctx->ether_desc = hdr.usb_cdc_ether_desc; ctx->func_desc = hdr.usb_cdc_ncm_desc; ctx->mbim_desc = hdr.usb_cdc_mbim_desc; ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; /* some buggy devices have an IAD but no CDC Union */ - if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { + if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 34799eaace41..9a5be8b85186 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -725,6 +725,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ + {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 46f4caddccbe..417903715437 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, &adapter->pdev->dev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + rbi->dma_addr)) { + dev_kfree_skb_any(rbi->skb); + rq->stats.rx_buf_alloc_failure++; + break; + } } else { /* rx buffer skipped by the device */ } @@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, &adapter->pdev->dev, rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + rbi->dma_addr)) { + put_page(rbi->page); + rq->stats.rx_buf_alloc_failure++; + break; + } } else { /* rx buffers skipped by the device */ } val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; } - BUG_ON(rbi->dma_addr == 0); gd->rxd.addr = cpu_to_le64(rbi->dma_addr); gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | rbi->len); @@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, } -static void +static int vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter) @@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi->dma_addr = dma_map_single(&adapter->pdev->dev, skb->data + buf_offset, buf_size, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) + return -EFAULT; tbi->len = buf_size; @@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, buf_offset, buf_size, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) + return -EFAULT; tbi->len = buf_size; @@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, /* set the last buf_info for the pkt */ tbi->skb = skb; tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; + + return 0; } @@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } /* fill tx descs related to addr & len */ - vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); + if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) + goto unlock_drop_pkt; /* setup the EOP desc */ ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); @@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_rx_buf_info *rbi; struct sk_buff *skb, *new_skb = NULL; struct page *new_page = NULL; + dma_addr_t new_dma_addr; int num_to_alloc; struct Vmxnet3_RxDesc *rxd; u32 idx, ring_idx; @@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } + new_dma_addr = dma_map_single(&adapter->pdev->dev, + new_skb->data, rbi->len, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + new_dma_addr)) { + dev_kfree_skb(new_skb); + /* Skb allocation failed, do not handover this + * skb to stack. Reuse it. Drop the existing pkt + */ + rq->stats.rx_buf_alloc_failure++; + ctx->skb = NULL; + rq->stats.drop_total++; + skip_page_frags = true; + goto rcd_done; + } dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, rbi->len, @@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->skb = new_skb; - rbi->dma_addr = dma_map_single(&adapter->pdev->dev, - rbi->skb->data, rbi->len, - PCI_DMA_FROMDEVICE); + rbi->dma_addr = new_dma_addr; rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->len = rbi->len; if (adapter->version == 2 && @@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } + new_dma_addr = dma_map_page(&adapter->pdev->dev + , rbi->page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + new_dma_addr)) { + put_page(new_page); + rq->stats.rx_buf_alloc_failure++; + dev_kfree_skb(ctx->skb); + ctx->skb = NULL; + skip_page_frags = true; + goto rcd_done; + } dma_unmap_page(&adapter->pdev->dev, rbi->dma_addr, rbi->len, @@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->page = new_page; - rbi->dma_addr = dma_map_page(&adapter->pdev->dev - , rbi->page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + rbi->dma_addr = new_dma_addr; rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->len = rbi->len; } @@ -2157,16 +2199,18 @@ vmxnet3_set_mc(struct net_device *netdev) if (!netdev_mc_empty(netdev)) { new_table = vmxnet3_copy_mc(netdev); if (new_table) { - rxConf->mfTableLen = cpu_to_le16( - netdev_mc_count(netdev) * ETH_ALEN); + size_t sz = netdev_mc_count(netdev) * ETH_ALEN; + + rxConf->mfTableLen = cpu_to_le16(sz); new_table_pa = dma_map_single( &adapter->pdev->dev, new_table, - rxConf->mfTableLen, + sz, PCI_DMA_TODEVICE); } - if (new_table_pa) { + if (!dma_mapping_error(&adapter->pdev->dev, + new_table_pa)) { new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { @@ -3074,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { + dev_err(&pdev->dev, "Failed to map dma\n"); + err = -EFAULT; + goto err_dma_map; + } adapter->shared = dma_alloc_coherent( &adapter->pdev->dev, sizeof(struct Vmxnet3_DriverShared), @@ -3232,6 +3281,7 @@ err_alloc_queue_desc: err_alloc_shared: dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); +err_dma_map: free_netdev(netdev); return err; } diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3f859a55c035..4c58c83dc225 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040300 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040400 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 92fa3e1ea65c..4f9748457f5a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); - int err; if (!data || !data[IFLA_VRF_TABLE]) return -EINVAL; @@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, dev->priv_flags |= IFF_L3MDEV_MASTER; - err = register_netdevice(dev); - if (err < 0) - goto out_fail; - - return 0; - -out_fail: - free_netdev(dev); - return err; + return register_netdevice(dev); } static size_t vrf_nl_getsize(const struct net_device *dev) diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index e92aaf615901..89541cc90e87 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) used = pvc_is_used(pvc); - if (type == ARPHRD_ETHER) { + if (type == ARPHRD_ETHER) dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, ether_setup); - dev->priv_flags &= ~IFF_TX_SKB_SHARING; - } else + else dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); if (!dev) { @@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) return -ENOBUFS; } - if (type == ARPHRD_ETHER) + if (type == ARPHRD_ETHER) { + dev->priv_flags &= ~IFF_TX_SKB_SHARING; eth_hw_addr_random(dev); - else { + } else { *(__be16*)dev->dev_addr = htons(dlci); dlci_to_q922(dev->broadcast, dlci); } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 5c47b011a9d7..cd39025d2abf 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty, static int x25_asy_open_tty(struct tty_struct *tty) { - struct x25_asy *sl = tty->disc_data; + struct x25_asy *sl; int err; if (tty->ops->write == NULL) return -EOPNOTSUPP; - /* First make sure we're not already connected. */ - if (sl && sl->magic == X25_ASY_MAGIC) - return -EEXIST; - /* OK. Find a free X.25 channel to use. */ sl = x25_asy_alloc(); if (sl == NULL) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index aa9bd92ac4ed..0947cc271e69 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = QCA988X_HW_2_0_VERSION, + .dev_id = QCA988X_2_0_DEVICE_ID, .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA6174_HW_2_1_VERSION, + .dev_id = QCA6164_2_1_DEVICE_ID, + .name = "qca6164 hw2.1", + .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, + .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .fw = { + .dir = QCA6174_HW_2_1_FW_DIR, + .fw = QCA6174_HW_2_1_FW_FILE, + .otp = QCA6174_HW_2_1_OTP_FILE, + .board = QCA6174_HW_2_1_BOARD_DATA_FILE, + .board_size = QCA6174_BOARD_DATA_SZ, + .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, + }, + }, + { + .id = QCA6174_HW_2_1_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA6174_HW_3_0_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA6174_HW_3_2_VERSION, + .dev_id = QCA6174_2_1_DEVICE_ID, .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, @@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA99X0_HW_2_0_DEV_VERSION, + .dev_id = QCA99X0_2_0_DEVICE_ID, .name = "qca99x0 hw2.0", .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, @@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, { .id = QCA9377_HW_1_0_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, .name = "qca9377 hw1.0", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, - .uart_pin = 7, + .uart_pin = 6, .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .fw = { + .dir = QCA9377_HW_1_0_FW_DIR, + .fw = QCA9377_HW_1_0_FW_FILE, + .otp = QCA9377_HW_1_0_OTP_FILE, + .board = QCA9377_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9377_BOARD_DATA_SZ, + .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, + }, + }, + { + .id = QCA9377_HW_1_1_DEV_VERSION, + .dev_id = QCA9377_1_0_DEVICE_ID, + .name = "qca9377 hw1.1", + .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .fw = QCA9377_HW_1_0_FW_FILE, @@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar) for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { hw_params = &ath10k_hw_params_list[i]; - if (hw_params->id == ar->target_version) + if (hw_params->id == ar->target_version && + hw_params->dev_id == ar->dev_id) break; } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 018c64f4fd25..858d75f49a9f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -636,6 +636,7 @@ struct ath10k { struct ath10k_hw_params { u32 id; + u16 dev_id; const char *name; u32 patch_load_addr; int uart_pin; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 39966a05c1cc..713c2bcea178 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -22,6 +22,12 @@ #define ATH10K_FW_DIR "ath10k" +#define QCA988X_2_0_DEVICE_ID (0x003c) +#define QCA6164_2_1_DEVICE_ID (0x0041) +#define QCA6174_2_1_DEVICE_ID (0x003e) +#define QCA99X0_2_0_DEVICE_ID (0x0040) +#define QCA9377_1_0_DEVICE_ID (0x0042) + /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 @@ -42,6 +48,10 @@ #define QCA6174_HW_3_0_VERSION 0x05020000 #define QCA6174_HW_3_2_VERSION 0x05030000 +/* QCA9377 target BMI version signatures */ +#define QCA9377_HW_1_0_DEV_VERSION 0x05020000 +#define QCA9377_HW_1_1_DEV_VERSION 0x05020001 + enum qca6174_pci_rev { QCA6174_PCI_REV_1_1 = 0x11, QCA6174_PCI_REV_1_3 = 0x13, @@ -60,6 +70,11 @@ enum qca6174_chip_id_rev { QCA6174_HW_3_2_CHIP_ID_REV = 10, }; +enum qca9377_chip_id_rev { + QCA9377_HW_1_0_CHIP_ID_REV = 0x0, + QCA9377_HW_1_1_CHIP_ID_REV = 0x1, +}; + #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" #define QCA6174_HW_2_1_FW_FILE "firmware.bin" #define QCA6174_HW_2_1_OTP_FILE "otp.bin" @@ -85,8 +100,6 @@ enum qca6174_chip_id_rev { #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 /* QCA9377 1.0 definitions */ -#define QCA9377_HW_1_0_DEV_VERSION 0x05020001 -#define QCA9377_HW_1_0_CHIP_ID_REV 0x1 #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" #define QCA9377_HW_1_0_FW_FILE "firmware.bin" #define QCA9377_HW_1_0_OTP_FILE "otp.bin" diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a7411fe90cc4..95a55405ebf0 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) static u32 get_nss_from_chainmask(u16 chain_mask) { - if ((chain_mask & 0x15) == 0x15) + if ((chain_mask & 0xf) == 0xf) return 4; else if ((chain_mask & 0x7) == 0x7) return 3; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 3fca200b986c..930785a724e1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define ATH10K_PCI_TARGET_WAIT 3000 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 -#define QCA988X_2_0_DEVICE_ID (0x003c) -#define QCA6164_2_1_DEVICE_ID (0x0041) -#define QCA6174_2_1_DEVICE_ID (0x003e) -#define QCA99X0_2_0_DEVICE_ID (0x0040) -#define QCA9377_1_0_DEVICE_ID (0x0042) - static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ @@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); @@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); -static const struct ce_attr host_ce_config_wlan[] = { +static struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, @@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, - .recv_cb = ath10k_pci_htc_rx_cb, + .recv_cb = ath10k_pci_htt_htc_rx_cb, }, /* CE2: target->host WMI */ @@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = { }; /* Target firmware's Copy Engine configuration. */ -static const struct ce_pipe_config target_ce_config_wlan[] = { +static struct ce_pipe_config target_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { .pipenum = __cpu_to_le32(0), @@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { * This table is derived from the CE_PCI TABLE, above. * It is passed to the Target at startup for use by firmware. */ -static const struct service_to_pipe target_service_to_ce_map_wlan[] = { +static struct service_to_pipe target_service_to_ce_map_wlan[] = { { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ @@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } +static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, 4); + + ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + /* Called by lower (CE) layer when a send to HTT Target completes. */ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) { @@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar) return 0; } +static void ath10k_pci_override_ce_config(struct ath10k *ar) +{ + struct ce_attr *attr; + struct ce_pipe_config *config; + + /* For QCA6174 we're overriding the Copy Engine 5 configuration, + * since it is currently used for other feature. + */ + + /* Override Host's Copy Engine 5 configuration */ + attr = &host_ce_config_wlan[5]; + attr->src_sz_max = 0; + attr->dest_nentries = 0; + + /* Override Target firmware's Copy Engine configuration */ + config = &target_ce_config_wlan[5]; + config->pipedir = __cpu_to_le32(PIPEDIR_OUT); + config->nbytes_max = __cpu_to_le32(2048); + + /* Map from service/endpoint to Copy Engine */ + target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); +} + static int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_core_destroy; } + if (QCA_REV_6174(ar)) + ath10k_pci_override_ce_config(ar); + ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 1a73c7a1da77..bf88ec3a65fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 17 +#define IWL7260_UCODE_API_MAX 19 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 0116e5a4c393..9bcc0bf937d8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 17 +#define IWL8000_UCODE_API_MAX 19 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 13 diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 85ae902df7c0..29ae58ebf223 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, * to transmit packets to the AP, i.e. the PTK. */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { - key->hw_key_idx = 0; mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); } else { /* * firmware only supports TSC/RSC for a single key, @@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, * with new ones -- this relies on mac80211 doing * list_add_tail(). */ - key->hw_key_idx = 1; mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); } - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); data->error = ret != 0; out_unlock: mutex_unlock(&mvm->mutex); @@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - /* We reprogram keys and shouldn't allocate new key indices */ - memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; mvm->ptk_ivlen = 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1fb684693040..e88afac51c5d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + u8 key_offset; if (iwlwifi_mod_params.sw_crypto) { IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); @@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; } + /* in HW restart reuse the index, otherwise request a new one */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + key_offset = key->hw_key_idx; + else + key_offset = STA_KEY_IDX_INVALID; + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, - test_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)); + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); /* diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 300a249486e4..354acbde088e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) return max_offs; } -static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, +static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) - return mvmvif->ap_sta_id; + mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + u8 sta_id = mvmvif->ap_sta_id; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + /* + * It is possible that the 'sta' parameter is NULL, + * for example when a GTK is removed - the sta_id will then + * be the AP ID, and no station was passed by mac80211. + */ + if (IS_ERR_OR_NULL(sta)) + return IWL_MVM_STATION_COUNT; + + return sta_id; + } return IWL_MVM_STATION_COUNT; } @@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, struct ieee80211_key_conf *keyconf, bool mcast, - u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) + u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, + u8 key_offset) { struct iwl_mvm_add_sta_key_cmd cmd = {}; __le16 key_flags; @@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - cmd.key_offset = keyconf->hw_key_idx; + cmd.key_offset = key_offset; cmd.key_flags = key_flags; cmd.sta_id = sta_id; @@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, + u8 key_offset, bool mcast) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - seq.tkip.iv32, p1k, 0); + seq.tkip.iv32, p1k, 0, key_offset); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - 0, NULL, 0); + 0, NULL, 0, key_offset); break; default: ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - 0, NULL, 0); + 0, NULL, 0, key_offset); } return ret; @@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, - bool have_key_offset) + u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; @@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(vif, sta); + sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); if (sta_id == IWL_MVM_STATION_COUNT) { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; @@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; - if (!have_key_offset) { - /* - * The D3 firmware hardcodes the PTK offset to 0, so we have to - * configure it there. As a result, this workaround exists to - * let the caller set the key offset (hw_key_idx), see d3.c. - */ - keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); - if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) + /* If the key_offset is not pre-assigned, we need to find a + * new offset to use. In normal cases, the offset is not + * pre-assigned, but during HW_RESTART we want to reuse the + * same indices, so we pass them when this function is called. + * + * In D3 entry, we need to hardcoded the indices (because the + * firmware hardcodes the PTK offset to 0). In this case, we + * need to make sure we don't overwrite the hw_key_idx in the + * keyconf structure, because otherwise we cannot configure + * the original ones back when resuming. + */ + if (key_offset == STA_KEY_IDX_INVALID) { + key_offset = iwl_mvm_set_fw_key_idx(mvm); + if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; + keyconf->hw_key_idx = key_offset; } - ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); if (ret) { __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); goto end; @@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, */ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { - ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, + key_offset, !mcast); if (ret) { __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); @@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Get the station id from the mvm local station table */ - sta_id = iwl_mvm_get_key_sta_id(vif, sta); + sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); @@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, return 0; } - /* - * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station table, - * for example when a GTK is removed (where the sta_id will then be - * the AP ID, and no station was passed by mac80211.) - */ - if (!sta) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - if (!sta) { - IWL_ERR(mvm, "Invalid station id\n"); - return -EINVAL; - } - } - - if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) - return -EINVAL; - ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; @@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; - u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); + u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) @@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, mvm_sta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, - iv32, phase1key, CMD_ASYNC); + iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); rcu_read_unlock(); } diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index eedb215eba3f..0631cc0a6d3c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - bool have_key_offset); + struct ieee80211_key_conf *keyconf, + u8 key_offset); int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 644b58bc5226..639761fb2bfb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, @@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 6e9418ed90c2..bbb789f8990b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - if (!rtlpci->int_clear) + if (rtlpci->int_clear) rtl8821ae_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 8ee141a55bc5..142bdff4ed60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); -MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); +MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 219dc206fa5f..a5fe23952586 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_BLK_DEV_NVME) += nvme.o -nvme-y += pci.o scsi.o lightnvm.o +lightnvm-$(CONFIG_NVM) := lightnvm.o +nvme-y += pci.o scsi.o $(lightnvm-y) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..06c336410235 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -22,8 +22,6 @@ #include "nvme.h" -#ifdef CONFIG_NVM - #include <linux/nvme.h> #include <linux/bitops.h> #include <linux/lightnvm.h> @@ -93,7 +91,7 @@ struct nvme_nvm_l2ptbl { __le16 cdw14[6]; }; -struct nvme_nvm_bbtbl { +struct nvme_nvm_getbbtbl { __u8 opcode; __u8 flags; __u16 command_id; @@ -101,10 +99,23 @@ struct nvme_nvm_bbtbl { __u64 rsvd[2]; __le64 prp1; __le64 prp2; - __le32 prp1_len; - __le32 prp2_len; - __le32 lbb; - __u32 rsvd11[3]; + __le64 spba; + __u32 rsvd4[4]; +}; + +struct nvme_nvm_setbbtbl { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 nlb; + __u8 value; + __u8 rsvd3; + __u32 rsvd4[3]; }; struct nvme_nvm_erase_blk { @@ -129,8 +140,8 @@ struct nvme_nvm_command { struct nvme_nvm_hb_rw hb_rw; struct nvme_nvm_ph_rw ph_rw; struct nvme_nvm_l2ptbl l2p; - struct nvme_nvm_bbtbl get_bb; - struct nvme_nvm_bbtbl set_bb; + struct nvme_nvm_getbbtbl get_bb; + struct nvme_nvm_setbbtbl set_bb; struct nvme_nvm_erase_blk erase; }; }; @@ -142,11 +153,13 @@ struct nvme_nvm_id_group { __u8 num_ch; __u8 num_lun; __u8 num_pln; + __u8 rsvd1; __le16 num_blk; __le16 num_pg; __le16 fpg_sz; __le16 csecs; __le16 sos; + __le16 rsvd2; __le32 trdt; __le32 trdm; __le32 tprt; @@ -154,8 +167,9 @@ struct nvme_nvm_id_group { __le32 tbet; __le32 tbem; __le32 mpos; + __le32 mccap; __le16 cpar; - __u8 reserved[913]; + __u8 reserved[906]; } __packed; struct nvme_nvm_addr_format { @@ -178,15 +192,28 @@ struct nvme_nvm_id { __u8 ver_id; __u8 vmnt; __u8 cgrps; - __u8 res[5]; + __u8 res; __le32 cap; __le32 dom; struct nvme_nvm_addr_format ppaf; - __u8 ppat; - __u8 resv[223]; + __u8 resv[228]; struct nvme_nvm_id_group groups[4]; } __packed; +struct nvme_nvm_bb_tbl { + __u8 tblid[4]; + __le16 verid; + __le16 revid; + __le32 rvsd1; + __le32 tblks; + __le32 tfact; + __le32 tgrown; + __le32 tdresv; + __le32 thresv; + __le32 rsvd2[8]; + __u8 blk[0]; +}; + /* * Check we didn't inadvertently grow the command struct */ @@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); - BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); } static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) @@ -234,6 +263,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) dst->tbet = le32_to_cpu(src->tbet); dst->tbem = le32_to_cpu(src->tbem); dst->mpos = le32_to_cpu(src->mpos); + dst->mccap = le32_to_cpu(src->mccap); dst->cpar = le16_to_cpu(src->cpar); } @@ -244,6 +274,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) { struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_command c = {}; int ret; @@ -256,8 +287,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) if (!nvme_nvm_id) return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, - sizeof(struct nvme_nvm_id)); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + nvme_nvm_id, sizeof(struct nvme_nvm_id)); if (ret) { ret = -EIO; goto out; @@ -268,6 +299,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) nvm_id->cgrps = nvme_nvm_id->cgrps; nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); + memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, + sizeof(struct nvme_nvm_addr_format)); ret = init_grps(nvm_id, nvme_nvm_id); out: @@ -281,7 +314,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - u32 len = queue_max_hw_sectors(q) << 9; + u32 len = queue_max_hw_sectors(dev->admin_q) << 9; u32 nlb_pr_rq = len / sizeof(u64); u64 cmd_slba = slba; void *entries; @@ -299,8 +332,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.nlb = cpu_to_le32(cmd_nlb); - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, - entries, len); + ret = nvme_submit_sync_cmd(dev->admin_q, + (struct nvme_command *)&c, entries, len); if (ret) { dev_err(dev->dev, "L2P table transfer failed (%d)\n", ret); @@ -322,43 +355,84 @@ out: return ret; } -static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, - unsigned int nr_blocks, - nvm_bb_update_fn *update_bbtbl, void *priv) +static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, + int nr_blocks, nvm_bb_update_fn *update_bbtbl, + void *priv) { + struct request_queue *q = nvmdev->q; struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - void *bb_bitmap; - u16 bb_bitmap_size; + struct nvme_nvm_bb_tbl *bb_tbl; + int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; int ret = 0; c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; c.get_bb.nsid = cpu_to_le32(ns->ns_id); - c.get_bb.lbb = cpu_to_le32(lunid); - bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; - bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); - if (!bb_bitmap) - return -ENOMEM; + c.get_bb.spba = cpu_to_le64(ppa.ppa); - bitmap_zero(bb_bitmap, nr_blocks); + bb_tbl = kzalloc(tblsz, GFP_KERNEL); + if (!bb_tbl) + return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, - bb_bitmap_size); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + bb_tbl, tblsz); if (ret) { dev_err(dev->dev, "get bad block table failed (%d)\n", ret); ret = -EIO; goto out; } - ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); + if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || + bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { + dev_err(dev->dev, "bbt format mismatch\n"); + ret = -EINVAL; + goto out; + } + + if (le16_to_cpu(bb_tbl->verid) != 1) { + ret = -EINVAL; + dev_err(dev->dev, "bbt version not supported\n"); + goto out; + } + + if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { + ret = -EINVAL; + dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", + le32_to_cpu(bb_tbl->tblks), nr_blocks); + goto out; + } + + ppa = dev_to_generic_addr(nvmdev, ppa); + ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); if (ret) { ret = -EINTR; goto out; } out: - kfree(bb_bitmap); + kfree(bb_tbl); + return ret; +} + +static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, + int type) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; + struct nvme_nvm_command c = {}; + int ret = 0; + + c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; + c.set_bb.nsid = cpu_to_le32(ns->ns_id); + c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); + c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); + c.set_bb.value = type; + + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + NULL, 0); + if (ret) + dev_err(dev->dev, "set bad block table failed (%d)\n", ret); return ret; } @@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .get_l2p_tbl = nvme_nvm_get_l2p_tbl, .get_bb_tbl = nvme_nvm_get_bb_tbl, + .set_bb_tbl = nvme_nvm_set_bb_tbl, .submit_io = nvme_nvm_submit_io, .erase_block = nvme_nvm_erase_block, @@ -496,31 +571,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name) nvm_unregister(disk_name); } +/* move to shared place when used in multiple places. */ +#define PCI_VENDOR_ID_CNEX 0x1d1d +#define PCI_DEVICE_ID_CNEX_WL 0x2807 +#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f + int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_dev *dev = ns->dev; struct pci_dev *pdev = to_pci_dev(dev->dev); /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ - if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && + if (pdev->vendor == PCI_VENDOR_ID_CNEX && + pdev->device == PCI_DEVICE_ID_CNEX_QEMU && id->vs[0] == 0x1) return 1; /* CNEX Labs - PCI ID + Vendor specific bit */ - if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && + if (pdev->vendor == PCI_VENDOR_ID_CNEX && + pdev->device == PCI_DEVICE_ID_CNEX_WL && id->vs[0] == 0x1) return 1; return 0; } -#else -int nvme_nvm_register(struct request_queue *q, char *disk_name) -{ - return 0; -} -void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; -int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) -{ - return 0; -} -#endif /* CONFIG_NVM */ diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index fdb4e5bad9ac..044253dca30a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); int nvme_sg_get_version_num(int __user *ip); +#ifdef CONFIG_NVM int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); int nvme_nvm_register(struct request_queue *q, char *disk_name); void nvme_nvm_unregister(struct request_queue *q, char *disk_name); +#else +static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) +{ + return 0; +} + +static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; + +static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + return 0; +} +#endif /* CONFIG_NVM */ #endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8187df204695..9e294ff4e652 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, goto retry_cmd; } if (blk_integrity_rq(req)) { - if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) + if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } sg_init_table(iod->meta_sg, 1); if (blk_rq_map_integrity_sg( - req->q, req->bio, iod->meta_sg) != 1) + req->q, req->bio, iod->meta_sg) != 1) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } if (rq_data_dir(req)) nvme_dif_remap(req, nvme_dif_prep); - if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) + if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { + dma_unmap_sg(dev->dev, iod->sg, iod->nents, + dma_dir); goto error_cmd; + } } } @@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) return; - writel(head, nvmeq->q_db + nvmeq->dev->db_stride); + if (likely(nvmeq->cq_vector >= 0)) + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); nvmeq->cq_head = head; nvmeq->cq_phase = phase; @@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u32 aqa; u64 cap = lo_hi_readq(&dev->bar->cap); struct nvme_queue *nvmeq; - unsigned page_shift = PAGE_SHIFT; + /* + * default to a 4K page size, with the intention to update this + * path in the future to accomodate architectures with differing + * kernel and IO page sizes. + */ + unsigned page_shift = 12; unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; - unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; if (page_shift < dev_page_min) { dev_err(dev->dev, @@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) 1 << page_shift); return -ENODEV; } - if (page_shift > dev_page_max) { - dev_info(dev->dev, - "Device maximum page size (%u) smaller than " - "host (%u); enabling work-around\n", - 1 << dev_page_max, 1 << page_shift); - page_shift = dev_page_max; - } dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? NVME_CAP_NSSRC(cap) : 0; @@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) if (dev->max_hw_sectors) { blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); blk_queue_max_segments(ns->queue, - ((dev->max_hw_sectors << 9) / dev->page_size) + 1); + (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); } if (dev->stripe_size) blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); @@ -2701,6 +2708,18 @@ static int nvme_dev_map(struct nvme_dev *dev) dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); dev->db_stride = 1 << NVME_CAP_STRIDE(cap); dev->dbs = ((void __iomem *)dev->bar) + 4096; + + /* + * Temporary fix for the Apple controller found in the MacBook8,1 and + * some MacBook7,1 to avoid controller resets and data loss. + */ + if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { + dev->q_depth = 2; + dev_warn(dev->dev, "detected Apple NVMe controller, set " + "queue depth=%u to work around controller resets\n", + dev->q_depth); + } + if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) dev->cmb = nvme_map_cmb(dev); @@ -2787,6 +2806,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) { struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; nvme_put_dq(dq); + + spin_lock_irq(&nvmeq->q_lock); + nvme_process_cq(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); } static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 540f077c37ea..02a7452bdf23 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp) ret, pp->io); continue; } - pp->io_base = pp->io->start; break; case IORESOURCE_MEM: pp->mem = win->res; diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 35457ecd8e70..163671a4f798 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = { .link_up = hisi_pcie_link_up, }; -static int __init hisi_add_pcie_port(struct pcie_port *pp, +static int hisi_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { int ret; @@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp, return 0; } -static int __init hisi_pcie_probe(struct platform_device *pdev) +static int hisi_pcie_probe(struct platform_device *pdev) { struct hisi_pcie *hisi_pcie; struct pcie_port *pp; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 4446fcb5effd..d7ffd66814bb 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev) pci_dev->state_saved = false; pci_dev->no_d3cold = false; error = pm->runtime_suspend(dev); - suspend_report_result(pm->runtime_suspend, error); - if (error) + if (error) { + /* + * -EBUSY and -EAGAIN is used to request the runtime PM core + * to schedule a new suspend, so log the event only with debug + * log level. + */ + if (error == -EBUSY || error == -EAGAIN) + dev_dbg(dev, "can't suspend now (%pf returned %d)\n", + pm->runtime_suspend, error); + else + dev_err(dev, "can't suspend (%pf returned %d)\n", + pm->runtime_suspend, error); + return error; + } if (!pci_dev->d3cold_allowed) pci_dev->no_d3cold = true; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 92618686604c..eead54cd01b2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev, if (ret) return ret; - if (node >= MAX_NUMNODES || !node_online(node)) + if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) + return -EINVAL; + + if (node != NUMA_NO_NODE && !node_online(node)) return -EINVAL; add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fd2f03fa53f3..d390fc1475ec 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) } #endif -struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); - #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e735c728e3b3..edb1984201e9 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev) { struct device *bridge = pci_get_host_bridge_device(dev); - if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { - if (bridge->parent) + if (IS_ENABLED(CONFIG_OF) && + bridge->parent && bridge->parent->of_node) { of_dma_configure(&dev->dev, bridge->parent->of_node); } else if (has_acpi_companion(bridge)) { struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index b422e4ed73f4..312c78b27a32 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -5,8 +5,6 @@ config PINCTRL bool -if PINCTRL - menu "Pin controllers" depends on PINCTRL @@ -274,5 +272,3 @@ config PINCTRL_TB10X select GPIOLIB endmenu - -endif diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 88a7fac11bd4..acaf84cadca3 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np, func->groups[i] = child->name; grp = &info->groups[grp_index++]; ret = imx1_pinctrl_parse_groups(child, grp, info, i++); - if (ret == -ENOMEM) + if (ret == -ENOMEM) { + of_node_put(child); return ret; + } } return 0; @@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev, for_each_child_of_node(np, child) { ret = imx1_pinctrl_parse_functions(child, info, ifunc++); - if (ret == -ENOMEM) + if (ret == -ENOMEM) { + of_node_put(child); return -ENOMEM; + } } return 0; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index f307f1d27d64..5c717275a7fa 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset) reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; bit = BIT(offset & 0xf); regmap_read(pctl->regmap1, reg_addr, &read_val); - return !!(read_val & bit); + return !(read_val & bit); } static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) @@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) unsigned int read_val = 0; struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev); - if (mtk_gpio_get_direction(chip, offset)) - reg_addr = mtk_get_port(pctl, offset) + - pctl->devdata->dout_offset; - else - reg_addr = mtk_get_port(pctl, offset) + - pctl->devdata->din_offset; + reg_addr = mtk_get_port(pctl, offset) + + pctl->devdata->din_offset; bit = BIT(offset & 0xf); regmap_read(pctl->regmap1, reg_addr, &read_val); @@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = { .owner = THIS_MODULE, .request = gpiochip_generic_request, .free = gpiochip_generic_free, + .get_direction = mtk_gpio_get_direction, .direction_input = mtk_gpio_direction_input, .direction_output = mtk_gpio_direction_output, .get = mtk_gpio_get, diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index d809c9eaa323..19a3c3bc2f1f 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); + pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 8982027de8e8..b868ef1766a0 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) return -ENOMEM; pctrl->dev = &pdev->dev; - pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); + pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c index e7deb51de7dc..9842bb106796 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c @@ -31,11 +31,11 @@ PORT_GP_12(5, fn, sfx) #undef _GP_DATA -#define _GP_DATA(bank, pin, name, sfx) \ +#define _GP_DATA(bank, pin, name, sfx, cfg) \ PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) -#define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT -#define _GP_INDT(bank, pin, name, sfx) name##_DATA +#define _GP_INOUTSEL(bank, pin, name, sfx, cfg) name##_IN, name##_OUT +#define _GP_INDT(bank, pin, name, sfx, cfg) name##_DATA #define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) #define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8b3130f22b42..9e03d158f411 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1478,6 +1478,8 @@ module_init(remoteproc_init); static void __exit remoteproc_exit(void) { + ida_destroy(&rproc_dev_index); + rproc_exit_debugfs(); } module_exit(remoteproc_exit); diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 9d30809bb407..916af5096f57 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf, char buf[10]; int ret; - if (count > sizeof(buf)) + if (count < 1 || count > sizeof(buf)) return count; ret = copy_from_user(buf, user_buf, count); diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 188006c55ce0..aa705bb4748c 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -15,9 +15,6 @@ #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> -#include <linux/pm_wakeirq.h> #include <linux/rtc/ds1307.h> #include <linux/rtc.h> #include <linux/slab.h> @@ -117,7 +114,6 @@ struct ds1307 { #define HAS_ALARM 1 /* bit 1 == irq claimed */ struct i2c_client *client; struct rtc_device *rtc; - int wakeirq; s32 (*read_block_data)(const struct i2c_client *client, u8 command, u8 length, u8 *values); s32 (*write_block_data)(const struct i2c_client *client, u8 command, @@ -1138,7 +1134,10 @@ read_rtc: bin2bcd(tmp)); } - device_set_wakeup_capable(&client->dev, want_irq); + if (want_irq) { + device_set_wakeup_capable(&client->dev, true); + set_bit(HAS_ALARM, &ds1307->flags); + } ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, rtc_ops, THIS_MODULE); if (IS_ERR(ds1307->rtc)) { @@ -1146,43 +1145,19 @@ read_rtc: } if (want_irq) { - struct device_node *node = client->dev.of_node; - err = devm_request_threaded_irq(&client->dev, client->irq, NULL, irq_handler, IRQF_SHARED | IRQF_ONESHOT, ds1307->rtc->name, client); if (err) { client->irq = 0; + device_set_wakeup_capable(&client->dev, false); + clear_bit(HAS_ALARM, &ds1307->flags); dev_err(&client->dev, "unable to request IRQ!\n"); - goto no_irq; - } - - set_bit(HAS_ALARM, &ds1307->flags); - dev_dbg(&client->dev, "got IRQ %d\n", client->irq); - - /* Currently supported by OF code only! */ - if (!node) - goto no_irq; - - err = of_irq_get(node, 1); - if (err <= 0) { - if (err == -EPROBE_DEFER) - goto exit; - goto no_irq; - } - ds1307->wakeirq = err; - - err = dev_pm_set_dedicated_wake_irq(&client->dev, - ds1307->wakeirq); - if (err) { - dev_err(&client->dev, "unable to setup wakeIRQ %d!\n", - err); - goto exit; - } + } else + dev_dbg(&client->dev, "got IRQ %d\n", client->irq); } -no_irq: if (chip->nvram_size) { ds1307->nvram = devm_kzalloc(&client->dev, @@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client) { struct ds1307 *ds1307 = i2c_get_clientdata(client); - if (ds1307->wakeirq) - dev_pm_clear_wake_irq(&client->dev); - if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 548a18916a31..a831d18596a5 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void) free_page((unsigned long)sei_page); } -int chsc_enable_facility(int operation_code) +int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code) { - unsigned long flags; int ret; - struct { - struct chsc_header request; - u8 reserved1:4; - u8 format:4; - u8 reserved2; - u16 operation_code; - u32 reserved3; - u32 reserved4; - u32 operation_data_area[252]; - struct chsc_header response; - u32 reserved5:4; - u32 format2:4; - u32 reserved6:24; - } __attribute__ ((packed)) *sda_area; - spin_lock_irqsave(&chsc_page_lock, flags); - memset(chsc_page, 0, PAGE_SIZE); - sda_area = chsc_page; sda_area->request.length = 0x0400; sda_area->request.code = 0x0031; sda_area->operation_code = operation_code; @@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code) default: ret = chsc_error_from_response(sda_area->response.code); } +out: + return ret; +} + +int chsc_enable_facility(int operation_code) +{ + struct chsc_sda_area *sda_area; + unsigned long flags; + int ret; + + spin_lock_irqsave(&chsc_page_lock, flags); + memset(chsc_page, 0, PAGE_SIZE); + sda_area = chsc_page; + + ret = __chsc_enable_facility(sda_area, operation_code); if (ret != 0) CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", operation_code, sda_area->response.code); -out: + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 76c9b50700b2..0de134c3a204 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -115,6 +115,20 @@ struct chsc_scpd { u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)); +struct chsc_sda_area { + struct chsc_header request; + u8 :4; + u8 format:4; + u8 :8; + u16 operation_code; + u32 :32; + u32 :32; + u32 operation_data_area[252]; + struct chsc_header response; + u32 :4; + u32 format2:4; + u32 :24; +} __packed __aligned(PAGE_SIZE); extern int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd); @@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void); extern int chsc_init(void); extern void chsc_init_cleanup(void); +int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code); extern int chsc_enable_facility(int); struct channel_subsystem; extern int chsc_secm(struct channel_subsystem *, int); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index b5620e818d6b..690b8547e828 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) { + static struct chsc_sda_area sda_area __initdata; struct subchannel_id schid; struct schib schib; schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; if (!schid.one) return -ENODEV; + + if (schid.ssid) { + /* + * Firmware should have already enabled MSS but whoever started + * the kernel might have initiated a channel subsystem reset. + * Ensure that MSS is enabled. + */ + memset(&sda_area, 0, sizeof(sda_area)); + if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS)) + return -ENODEV; + } if (stsch_err(schid, &schib)) return -ENODEV; if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) return -ENODEV; if (!schib.pmcw.dnv) return -ENODEV; + + iplinfo->ssid = schid.ssid; iplinfo->devno = schib.pmcw.dev; iplinfo->is_qdio = schib.pmcw.qf; return 0; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2ee3053bdc12..489e703dc82d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) css->global_pgid.pgid_high.ext_cssid.version = 0x80; css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; } else { -#ifdef CONFIG_SMP css->global_pgid.pgid_high.cpu_addr = stap(); -#else - css->global_pgid.pgid_high.cpu_addr = 0; -#endif } get_cpu_id(&cpu_id); css->global_pgid.cpu_id = cpu_id.ident; css->global_pgid.cpu_model = cpu_id.machine; css->global_pgid.tod_high = tod_high; - } static void diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 57f710b3c8a4..b8ab18676e69 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -3,6 +3,9 @@ # ap-objs := ap_bus.o -obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o -obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o +# zcrypt_api depends on ap +obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o +# msgtype* depend on zcrypt_api obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o +# adapter drivers depend on ap, zcrypt_api and msgtype* +obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9cb3dfbcaddb..61f768518a34 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL; static struct ap_config_info *ap_configuration; static DEFINE_SPINLOCK(ap_device_list_lock); static LIST_HEAD(ap_device_list); +static bool initialised; /* * Workqueue timer for bus rescan. @@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, { struct device_driver *drv = &ap_drv->driver; + if (!initialised) + return -ENODEV; + drv->bus = &ap_bus_type; drv->probe = ap_device_probe; drv->remove = ap_device_remove; @@ -1808,6 +1812,7 @@ int __init ap_module_init(void) goto out_pm; queue_work(system_long_wq, &ap_scan_work); + initialised = true; return 0; @@ -1837,6 +1842,7 @@ void ap_module_exit(void) { int i; + initialised = false; ap_reset_domain(); ap_poll_thread_stop(); del_timer_sync(&ap_config_timer); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index a9603ebbc1f8..9f8fa42c062c 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister); void zcrypt_msgtype_register(struct zcrypt_ops *zops) { - if (zops->owner) { - spin_lock_bh(&zcrypt_ops_list_lock); - list_add_tail(&zops->list, &zcrypt_ops_list); - spin_unlock_bh(&zcrypt_ops_list_lock); - } + spin_lock_bh(&zcrypt_ops_list_lock); + list_add_tail(&zops->list, &zcrypt_ops_list); + spin_unlock_bh(&zcrypt_ops_list_lock); } EXPORT_SYMBOL(zcrypt_msgtype_register); @@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant) spin_lock_bh(&zcrypt_ops_list_lock); list_for_each_entry(zops, &zcrypt_ops_list, list) { if ((zops->variant == variant) && - (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { + (!strncmp(zops->name, name, sizeof(zops->name)))) { found = 1; break; } diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 750876891931..38618f05ad92 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -96,6 +96,7 @@ struct zcrypt_ops { struct list_head list; /* zcrypt ops list. */ struct module *owner; int variant; + char name[128]; }; struct zcrypt_device { diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 71ceee9137a8..74edf2934e7c 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = { .rsa_modexpo = zcrypt_cex2a_modexpo, .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, .owner = THIS_MODULE, + .name = MSGTYPE50_NAME, .variant = MSGTYPE50_VARIANT_DEFAULT, }; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 74762214193b..9a2dd472c1cc 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, */ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { .owner = THIS_MODULE, + .name = MSGTYPE06_NAME, .variant = MSGTYPE06_VARIANT_NORNG, .rsa_modexpo = zcrypt_msgtype6_modexpo, .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, @@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { static struct zcrypt_ops zcrypt_msgtype6_ops = { .owner = THIS_MODULE, + .name = MSGTYPE06_NAME, .variant = MSGTYPE06_VARIANT_DEFAULT, .rsa_modexpo = zcrypt_msgtype6_modexpo, .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, @@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = { static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { .owner = THIS_MODULE, + .name = MSGTYPE06_NAME, .variant = MSGTYPE06_VARIANT_EP11, .rsa_modexpo = NULL, .rsa_modexpo_crt = NULL, diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3ba2e9564b9a..81af294f15a7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); } -CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); +CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 25abd4eb7d10..91a003011acf 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = { static int __init sh_pm_runtime_init(void) { - if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { + if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { if (!of_find_compatible_node(NULL, NULL, "renesas,cpg-mstp-clocks")) return 0; diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 9d5068248aa0..0a4ea809a61b 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -23,6 +23,7 @@ config MTK_PMIC_WRAP config MTK_SCPSYS bool "MediaTek SCPSYS Support" depends on ARCH_MEDIATEK || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK select REGMAP select MTK_INFRACFG select PM_GENERIC_DOMAINS if PM diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index f3a0b6a4b54e..8c03a80b482d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev) block++; if (!block->size) - return 0; + continue; dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", block->phys, block->virt, block->size); @@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev, for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { if (knav_acc_firmwares[i]) { - ret = request_firmware(&fw, - knav_acc_firmwares[i], - kdev->dev); + ret = request_firmware_direct(&fw, + knav_acc_firmwares[i], + kdev->dev); if (!ret) { found = true; break; diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 06858e04ec59..bf9a610e5b89 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) goto out_clk_disable; } - dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", - r->start, irq, bs->fifo_size); + dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n", + r, irq, bs->fifo_size); return 0; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 563954a61424..7840067062a8 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi) if (!spi->controller_data) spi->controller_data = (void *)&mtk_default_chip_info; - if (mdata->dev_comp->need_pad_sel) + if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); return 0; @@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev) goto err_put_master; } - for (i = 0; i < master->num_chipselect; i++) { - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - dev_name(&pdev->dev)); - if (ret) { - dev_err(&pdev->dev, - "can't get CS GPIO %i\n", i); - goto err_put_master; + if (!master->cs_gpios && master->num_chipselect > 1) { + dev_err(&pdev->dev, + "cs_gpios not specified and num_chipselect > 1\n"); + ret = -EINVAL; + goto err_put_master; + } + + if (master->cs_gpios) { + for (i = 0; i < master->num_chipselect; i++) { + ret = devm_gpio_request(&pdev->dev, + master->cs_gpios[i], + dev_name(&pdev->dev)); + if (ret) { + dev_err(&pdev->dev, + "can't get CS GPIO %i\n", i); + goto err_put_master; + } } } } diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 94af80676684..5e5fd77e2711 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1171,19 +1171,31 @@ err_no_rxchan: static int pl022_dma_autoprobe(struct pl022 *pl022) { struct device *dev = &pl022->adev->dev; + struct dma_chan *chan; + int err; /* automatically configure DMA channels from platform, normally using DT */ - pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx"); - if (!pl022->dma_rx_channel) + chan = dma_request_slave_channel_reason(dev, "rx"); + if (IS_ERR(chan)) { + err = PTR_ERR(chan); goto err_no_rxchan; + } + + pl022->dma_rx_channel = chan; - pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx"); - if (!pl022->dma_tx_channel) + chan = dma_request_slave_channel_reason(dev, "tx"); + if (IS_ERR(chan)) { + err = PTR_ERR(chan); goto err_no_txchan; + } + + pl022->dma_tx_channel = chan; pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!pl022->dummypage) + if (!pl022->dummypage) { + err = -ENOMEM; goto err_no_dummypage; + } return 0; @@ -1194,7 +1206,7 @@ err_no_txchan: dma_release_channel(pl022->dma_rx_channel); pl022->dma_rx_channel = NULL; err_no_rxchan: - return -ENODEV; + return err; } static void terminate_dma(struct pl022 *pl022) @@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) /* Get DMA channels, try autoconfiguration first */ status = pl022_dma_autoprobe(pl022); + if (status == -EPROBE_DEFER) { + dev_dbg(dev, "deferring probe to get DMA channel\n"); + goto err_no_irq; + } /* If that failed, use channels from platform_info */ if (status == 0) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e2415be209d5..2b0a8ec3affb 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev) /** * __spi_register_driver - register a SPI driver + * @owner: owner module of the driver to register * @sdrv: the driver to register * Context: can sleep * @@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * Set transfer tx_nbits and rx_nbits as single transfer default * (SPI_NBITS_SINGLE) if it is not set for this transfer. */ + message->frame_length = 0; list_for_each_entry(xfer, &message->transfers, transfer_list) { message->frame_length += xfer->len; if (!xfer->bits_per_word) diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index 6d5b38d69578..9d7f0004d2d7 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig @@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig" source "drivers/staging/iio/trigger/Kconfig" config IIO_DUMMY_EVGEN - tristate + tristate + select IRQ_WORK config IIO_SIMPLE_DUMMY tristate "An example driver with no hardware requirements" diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c index d11c54b72186..b51f237cd817 100644 --- a/drivers/staging/iio/adc/lpc32xx_adc.c +++ b/drivers/staging/iio/adc/lpc32xx_adc.c @@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, if (mask == IIO_CHAN_INFO_RAW) { mutex_lock(&indio_dev->mlock); - clk_enable(info->clk); + clk_prepare_enable(info->clk); /* Measurement setup */ __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, LPC32XX_ADC_SELECT(info->adc_base)); @@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, __raw_writel(AD_PDN_CTRL | AD_STROBE, LPC32XX_ADC_CTRL(info->adc_base)); wait_for_completion(&info->completion); /* set by ISR */ - clk_disable(info->clk); + clk_disable_unprepare(info->clk); *val = info->value; mutex_unlock(&indio_dev->mlock); diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c index e10c6ffa698a..9568bdb6319b 100644 --- a/drivers/staging/wilc1000/coreconfigurator.c +++ b/drivers/staging/wilc1000/coreconfigurator.c @@ -13,12 +13,8 @@ #include "wilc_wlan.h" #include <linux/errno.h> #include <linux/slab.h> -#include <linux/etherdevice.h> #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ BEACON_INTERVAL_LEN + CAP_INFO_LEN) -#define ADDR1 4 -#define ADDR2 10 -#define ADDR3 16 /* Basic Frame Type Codes (2-bit) */ enum basic_frame_type { @@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header) return ((header[1] & 0x02) >> 1); } +/* This function extracts the MAC Address in 'address1' field of the MAC */ +/* header and updates the MAC Address in the allocated 'addr' variable. */ +static inline void get_address1(u8 *pu8msa, u8 *addr) +{ + memcpy(addr, pu8msa + 4, 6); +} + +/* This function extracts the MAC Address in 'address2' field of the MAC */ +/* header and updates the MAC Address in the allocated 'addr' variable. */ +static inline void get_address2(u8 *pu8msa, u8 *addr) +{ + memcpy(addr, pu8msa + 10, 6); +} + +/* This function extracts the MAC Address in 'address3' field of the MAC */ +/* header and updates the MAC Address in the allocated 'addr' variable. */ +static inline void get_address3(u8 *pu8msa, u8 *addr) +{ + memcpy(addr, pu8msa + 16, 6); +} + /* This function extracts the BSSID from the incoming WLAN packet based on */ -/* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ +/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */ /* variable. */ static inline void get_BSSID(u8 *data, u8 *bssid) { if (get_from_ds(data) == 1) - /* - * Extract the MAC Address in 'address2' field of the MAC - * header and update the MAC Address in the allocated 'data' - * variable. - */ - ether_addr_copy(data, bssid + ADDR2); + get_address2(data, bssid); else if (get_to_ds(data) == 1) - /* - * Extract the MAC Address in 'address1' field of the MAC - * header and update the MAC Address in the allocated 'data' - * variable. - */ - ether_addr_copy(data, bssid + ADDR1); + get_address1(data, bssid); else - /* - * Extract the MAC Address in 'address3' field of the MAC - * header and update the MAC Address in the allocated 'data' - * variable. - */ - ether_addr_copy(data, bssid + ADDR3); + get_address3(data, bssid); } /* This function extracts the SSID from a beacon/probe response frame */ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 342a07c58d89..72204fbf2bb1 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4074,6 +4074,17 @@ reject: return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } +static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) +{ + bool ret; + + spin_lock_bh(&conn->state_lock); + ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); + spin_unlock_bh(&conn->state_lock); + + return ret; +} + int iscsi_target_rx_thread(void *arg) { int ret, rc; @@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg) * incoming iscsi/tcp socket I/O, and/or failing the connection. */ rc = wait_for_completion_interruptible(&conn->rx_login_comp); - if (rc < 0) + if (rc < 0 || iscsi_target_check_conn_state(conn)) return 0; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 5c964c09c89f..9fc9117d0f22 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -388,6 +388,7 @@ err: if (login->login_complete) { if (conn->rx_thread && conn->rx_thread_active) { send_sig(SIGINT, conn->rx_thread, 1); + complete(&conn->rx_login_comp); kthread_stop(conn->rx_thread); } if (conn->tx_thread && conn->tx_thread_active) { diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 51d1734d5390..2cbea2af7cd0 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) if (!pl) { pr_err("Unable to allocate memory for" " struct iscsi_param_list.\n"); - return -1 ; + return -ENOMEM; } INIT_LIST_HEAD(&pl->param_list); INIT_LIST_HEAD(&pl->extra_response_list); @@ -578,7 +578,7 @@ int iscsi_copy_param_list( param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); if (!param_list) { pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(¶m_list->param_list); INIT_LIST_HEAD(¶m_list->extra_response_list); @@ -629,7 +629,7 @@ int iscsi_copy_param_list( err_out: iscsi_release_param_list(param_list); - return -1; + return -ENOMEM; } static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) @@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response( if (!extra_response) { pr_err("Unable to allocate memory for" " struct iscsi_extra_response.\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&extra_response->er_list); @@ -1370,7 +1370,7 @@ int iscsi_decode_text_input( tmpbuf = kzalloc(length + 1, GFP_KERNEL); if (!tmpbuf) { pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); - return -1; + return -ENOMEM; } memcpy(tmpbuf, textbuf, length); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 0b4b2a67d9f9..98698d875742 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o return 0; } -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, + int *post_ret) { unsigned char *buf, *addr; struct scatterlist *sg; @@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd) cmd->data_direction); } -static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + int *post_ret) { struct se_device *dev = cmd->se_dev; @@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) * sent to the backend driver. */ spin_lock_irq(&cmd->t_state_lock); - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) + if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; + *post_ret = 1; + } spin_unlock_irq(&cmd->t_state_lock); /* @@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) return TCM_NO_SENSE; } -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, + int *post_ret) { struct se_device *dev = cmd->se_dev; struct scatterlist *write_sg = NULL, *sg; @@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes if (block_size < PAGE_SIZE) { sg_set_page(&write_sg[i], m.page, block_size, - block_size); + m.piter.sg->offset + block_size); } else { sg_miter_next(&m); sg_set_page(&write_sg[i], m.page, block_size, - 0); + m.piter.sg->offset); } len -= block_size; i++; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 273c72b2b83d..81a6b3e07687 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) char str[sizeof(dev->t10_wwn.model)+1]; /* scsiLuProductId */ - for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) + for (i = 0; i < sizeof(dev->t10_wwn.model); i++) str[i] = ISPRINT(dev->t10_wwn.model[i]) ? dev->t10_wwn.model[i] : ' '; str[i] = '\0'; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 5b2820312310..28fb3016370f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -130,6 +130,9 @@ void core_tmr_abort_task( if (tmr->ref_task_tag != ref_tag) continue; + if (!kref_get_unless_zero(&se_cmd->cmd_kref)) + continue; + printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); @@ -139,13 +142,15 @@ void core_tmr_abort_task( " skipping\n", ref_tag); spin_unlock(&se_cmd->t_state_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + target_put_sess_cmd(se_cmd); + goto out; } se_cmd->transport_state |= CMD_T_ABORTED; spin_unlock(&se_cmd->t_state_lock); list_del_init(&se_cmd->se_cmd_list); - kref_get(&se_cmd->cmd_kref); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); cancel_work_sync(&se_cmd->work); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5bacc7b5ed6d..4fdcee2006d1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) void transport_generic_request_failure(struct se_cmd *cmd, sense_reason_t sense_reason) { - int ret = 0; + int ret = 0, post_ret = 0; pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); @@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, */ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && cmd->transport_complete_callback) - cmd->transport_complete_callback(cmd, false); + cmd->transport_complete_callback(cmd, false, &post_ret); switch (sense_reason) { case TCM_NON_EXISTENT_LUN: @@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work) */ if (cmd->transport_complete_callback) { sense_reason_t rc; + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); + bool zero_dl = !(cmd->data_length); + int post_ret = 0; - rc = cmd->transport_complete_callback(cmd, true); - if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && - !cmd->data_length) + rc = cmd->transport_complete_callback(cmd, true, &post_ret); + if (!rc && !post_ret) { + if (caw && zero_dl) goto queue_rsp; return; @@ -2507,23 +2509,24 @@ out: EXPORT_SYMBOL(target_get_sess_cmd); static void target_release_cmd_kref(struct kref *kref) - __releases(&se_cmd->se_sess->sess_cmd_lock) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; + unsigned long flags; + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) se_cmd->se_tfo->release_cmd(se_cmd); return 1; } - return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, - &se_sess->sess_cmd_lock); + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 937cebf76633..5e6d6cb348fc 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) return 0; - if (!time_after(cmd->deadline, jiffies)) + if (!time_after(jiffies, cmd->deadline)) return 0; set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); @@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd) static const struct target_backend_ops tcmu_ops = { .name = "user", - .inquiry_prod = "USER", - .inquiry_rev = TCMU_VERSION, .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = tcmu_attach_hba, diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c463c89b90ef..8cc4ac64a91c 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -382,7 +382,7 @@ endmenu config QCOM_SPMI_TEMP_ALARM tristate "Qualcomm SPMI PMIC Temperature Alarm" - depends on OF && (SPMI || COMPILE_TEST) && IIO + depends on OF && SPMI && IIO select REGMAP_SPMI help This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c8fe3cac2e0e..c5547bd711db 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -55,6 +55,7 @@ #define TEMPSENSE2_PANIC_VALUE_SHIFT 16 #define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 +#define OCOTP_MEM0 0x0480 #define OCOTP_ANA1 0x04e0 /* The driver supports 1 passive trip point and 1 critical trip point */ @@ -64,12 +65,6 @@ enum imx_thermal_trip { IMX_TRIP_NUM, }; -/* - * It defines the temperature in millicelsius for passive trip point - * that will trigger cooling action when crossed. - */ -#define IMX_TEMP_PASSIVE 85000 - #define IMX_POLLING_DELAY 2000 /* millisecond */ #define IMX_PASSIVE_DELAY 1000 @@ -100,12 +95,14 @@ struct imx_thermal_data { u32 c1, c2; /* See formula in imx_get_sensor_data() */ int temp_passive; int temp_critical; + int temp_max; int alarm_temp; int last_temp; bool irq_enabled; int irq; struct clk *thermal_clk; const struct thermal_soc_data *socdata; + const char *temp_grade; }; static void imx_set_panic_temp(struct imx_thermal_data *data, @@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, { struct imx_thermal_data *data = tz->devdata; + /* do not allow changing critical threshold */ if (trip == IMX_TRIP_CRITICAL) return -EPERM; - if (temp < 0 || temp > IMX_TEMP_PASSIVE) + /* do not allow passive to be set higher than critical */ + if (temp < 0 || temp > data->temp_critical) return -EINVAL; data->temp_passive = temp; @@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev) data->c1 = temp64; data->c2 = n1 * data->c1 + 1000 * t1; - /* - * Set the default passive cooling trip point, - * can be changed from userspace. - */ - data->temp_passive = IMX_TEMP_PASSIVE; + /* use OTP for thermal grade */ + ret = regmap_read(map, OCOTP_MEM0, &val); + if (ret) { + dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret); + return ret; + } + + /* The maximum die temp is specified by the Temperature Grade */ + switch ((val >> 6) & 0x3) { + case 0: /* Commercial (0 to 95C) */ + data->temp_grade = "Commercial"; + data->temp_max = 95000; + break; + case 1: /* Extended Commercial (-20 to 105C) */ + data->temp_grade = "Extended Commercial"; + data->temp_max = 105000; + break; + case 2: /* Industrial (-40 to 105C) */ + data->temp_grade = "Industrial"; + data->temp_max = 105000; + break; + case 3: /* Automotive (-40 to 125C) */ + data->temp_grade = "Automotive"; + data->temp_max = 125000; + break; + } /* - * The maximum die temperature set to 20 C higher than - * IMX_TEMP_PASSIVE. + * Set the critical trip point at 5C under max + * Set the passive trip point at 10C under max (can change via sysfs) */ - data->temp_critical = 1000 * 20 + data->temp_passive; + data->temp_critical = data->temp_max - (1000 * 5); + data->temp_passive = data->temp_max - (1000 * 10); return 0; } @@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev) return ret; } + dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC" + " critical:%dC passive:%dC\n", data->temp_grade, + data->temp_max / 1000, data->temp_critical / 1000, + data->temp_passive / 1000); + /* Enable measurements at ~ 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 42b7d4253b94..be4eedcb839a 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void) np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { - pr_err("unable to find thermal zones\n"); + pr_debug("unable to find thermal zones\n"); return; } diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index f0fbea386869..1246aa6fcab0 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, /** * pid_controller() - PID controller * @tz: thermal zone we are operating in - * @current_temp: the current temperature in millicelsius * @control_temp: the target temperature in millicelsius * @max_allocatable_power: maximum allocatable power for this thermal zone * @@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, * Return: The power budget for the next period. */ static u32 pid_controller(struct thermal_zone_device *tz, - int current_temp, int control_temp, u32 max_allocatable_power) { @@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz, true); } - err = control_temp - current_temp; + err = control_temp - tz->temperature; err = int_to_frac(err); /* Calculate the proportional term */ @@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, } static int allocate_power(struct thermal_zone_device *tz, - int current_temp, int control_temp) { struct thermal_instance *instance; @@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz, i++; } - power_range = pid_controller(tz, current_temp, control_temp, - max_allocatable_power); + power_range = pid_controller(tz, control_temp, max_allocatable_power); divvy_up_power(weighted_req_power, max_power, num_actors, total_weighted_req_power, power_range, granted_power, @@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz, trace_thermal_power_allocator(tz, req_power, total_req_power, granted_power, total_granted_power, num_actors, power_range, - max_allocatable_power, current_temp, - control_temp - current_temp); + max_allocatable_power, tz->temperature, + control_temp - tz->temperature); kfree(req_power); unlock: @@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz) static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) { int ret; - int switch_on_temp, control_temp, current_temp; + int switch_on_temp, control_temp; struct power_allocator_params *params = tz->governor_data; /* @@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) if (trip != params->trip_max_desired_temperature) return 0; - ret = thermal_zone_get_temp(tz, ¤t_temp); - if (ret) { - dev_warn(&tz->device, "Failed to get temperature: %d\n", ret); - return ret; - } - ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, &switch_on_temp); - if (!ret && (current_temp < switch_on_temp)) { + if (!ret && (tz->temperature < switch_on_temp)) { tz->passive = 0; reset_pid_controller(params); allow_maximum_power(tz); @@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) return ret; } - return allocate_power(tz, current_temp, control_temp); + return allocate_power(tz, control_temp); } static struct thermal_governor thermal_gov_power_allocator = { diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 5d4ae7d705e0..13d01edc7a04 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) /* * platform functions */ +static int rcar_thermal_remove(struct platform_device *pdev) +{ + struct rcar_thermal_common *common = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + struct rcar_thermal_priv *priv; + + rcar_thermal_for_each_priv(priv, common) { + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + thermal_zone_device_unregister(priv->zone); + } + + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + static int rcar_thermal_probe(struct platform_device *pdev) { struct rcar_thermal_common *common; @@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (!common) return -ENOMEM; + platform_set_drvdata(pdev, common); + INIT_LIST_HEAD(&common->head); spin_lock_init(&common->lock); common->dev = dev; @@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev) rcar_thermal_common_write(common, ENR, enr_bits); } - platform_set_drvdata(pdev, common); - dev_info(dev, "%d sensor probed\n", i); return 0; error_unregister: - rcar_thermal_for_each_priv(priv, common) { - if (rcar_has_irq_support(priv)) - rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); - } - - pm_runtime_put(dev); - pm_runtime_disable(dev); + rcar_thermal_remove(pdev); return ret; } -static int rcar_thermal_remove(struct platform_device *pdev) -{ - struct rcar_thermal_common *common = platform_get_drvdata(pdev); - struct device *dev = &pdev->dev; - struct rcar_thermal_priv *priv; - - rcar_thermal_for_each_priv(priv, common) { - if (rcar_has_irq_support(priv)) - rcar_thermal_irq_disable(priv); - thermal_zone_device_unregister(priv->zone); - } - - pm_runtime_put(dev); - pm_runtime_disable(dev); - - return 0; -} - static const struct of_device_id rcar_thermal_dt_ids[] = { { .compatible = "renesas,rcar-thermal", }, {}, diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 9787e8aa509f..e845841ab036 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1,6 +1,9 @@ /* * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * Caesar Wang <wxt@rock-chips.com> + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -45,17 +48,50 @@ enum tshut_polarity { }; /** - * The system has three Temperature Sensors. channel 0 is reserved, - * channel 1 is for CPU, and channel 2 is for GPU. + * The system has two Temperature Sensors. + * sensor0 is for CPU, and sensor1 is for GPU. */ enum sensor_id { - SENSOR_CPU = 1, + SENSOR_CPU = 0, SENSOR_GPU, }; +/** +* The conversion table has the adc value and temperature. +* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table) +* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table) +*/ +enum adc_sort_mode { + ADC_DECREMENT = 0, + ADC_INCREMENT, +}; + +/** + * The max sensors is two in rockchip SoCs. + * Two sensors: CPU and GPU sensor. + */ +#define SOC_MAX_SENSORS 2 + +struct chip_tsadc_table { + const struct tsadc_table *id; + + /* the array table size*/ + unsigned int length; + + /* that analogic mask data */ + u32 data_mask; + + /* the sort mode is adc value that increment or decrement in table */ + enum adc_sort_mode mode; +}; + struct rockchip_tsadc_chip { + /* The sensor id of chip correspond to the ADC channel */ + int chn_id[SOC_MAX_SENSORS]; + int chn_num; + /* The hardware-controlled tshut property */ - long tshut_temp; + int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; @@ -65,37 +101,40 @@ struct rockchip_tsadc_chip { void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ - int (*get_temp)(int chn, void __iomem *reg, int *temp); - void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); + int (*get_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int *temp); + void (*set_tshut_temp)(struct chip_tsadc_table table, + int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); + + /* Per-table methods */ + struct chip_tsadc_table table; }; struct rockchip_thermal_sensor { struct rockchip_thermal_data *thermal; struct thermal_zone_device *tzd; - enum sensor_id id; + int id; }; -#define NUM_SENSORS 2 /* Ignore unused sensor 0 */ - struct rockchip_thermal_data { const struct rockchip_tsadc_chip *chip; struct platform_device *pdev; struct reset_control *reset; - struct rockchip_thermal_sensor sensors[NUM_SENSORS]; + struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS]; struct clk *clk; struct clk *pclk; void __iomem *regs; - long tshut_temp; + int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; }; -/* TSADC V2 Sensor info define: */ +/* TSADC Sensor info define: */ #define TSADCV2_AUTO_CON 0x04 #define TSADCV2_INT_EN 0x08 #define TSADCV2_INT_PD 0x0c @@ -117,6 +156,8 @@ struct rockchip_thermal_data { #define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) #define TSADCV2_DATA_MASK 0xfff +#define TSADCV3_DATA_MASK 0x3ff + #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 #define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ @@ -124,7 +165,7 @@ struct rockchip_thermal_data { struct tsadc_table { u32 code; - long temp; + int temp; }; static const struct tsadc_table v2_code_table[] = { @@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = { {3421, 125000}, }; -static u32 rk_tsadcv2_temp_to_code(long temp) +static const struct tsadc_table v3_code_table[] = { + {0, -40000}, + {106, -40000}, + {108, -35000}, + {110, -30000}, + {112, -25000}, + {114, -20000}, + {116, -15000}, + {118, -10000}, + {120, -5000}, + {122, 0}, + {124, 5000}, + {126, 10000}, + {128, 15000}, + {130, 20000}, + {132, 25000}, + {134, 30000}, + {136, 35000}, + {138, 40000}, + {140, 45000}, + {142, 50000}, + {144, 55000}, + {146, 60000}, + {148, 65000}, + {150, 70000}, + {152, 75000}, + {154, 80000}, + {156, 85000}, + {158, 90000}, + {160, 95000}, + {162, 100000}, + {163, 105000}, + {165, 110000}, + {167, 115000}, + {169, 120000}, + {171, 125000}, + {TSADCV3_DATA_MASK, 125000}, +}; + +static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, + int temp) { int high, low, mid; low = 0; - high = ARRAY_SIZE(v2_code_table) - 1; + high = table.length - 1; mid = (high + low) / 2; - if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) + if (temp < table.id[low].temp || temp > table.id[high].temp) return 0; while (low <= high) { - if (temp == v2_code_table[mid].temp) - return v2_code_table[mid].code; - else if (temp < v2_code_table[mid].temp) + if (temp == table.id[mid].temp) + return table.id[mid].code; + else if (temp < table.id[mid].temp) high = mid - 1; else low = mid + 1; @@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp) return 0; } -static int rk_tsadcv2_code_to_temp(u32 code, int *temp) +static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, + int *temp) { unsigned int low = 1; - unsigned int high = ARRAY_SIZE(v2_code_table) - 1; + unsigned int high = table.length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; - BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); - - code &= TSADCV2_DATA_MASK; - if (code < v2_code_table[high].code) - return -EAGAIN; /* Incorrect reading */ - - while (low <= high) { - if (code >= v2_code_table[mid].code && - code < v2_code_table[mid - 1].code) - break; - else if (code < v2_code_table[mid].code) - low = mid + 1; - else - high = mid - 1; - mid = (low + high) / 2; + WARN_ON(table.length < 2); + + switch (table.mode) { + case ADC_DECREMENT: + code &= table.data_mask; + if (code < table.id[high].code) + return -EAGAIN; /* Incorrect reading */ + + while (low <= high) { + if (code >= table.id[mid].code && + code < table.id[mid - 1].code) + break; + else if (code < table.id[mid].code) + low = mid + 1; + else + high = mid - 1; + + mid = (low + high) / 2; + } + break; + case ADC_INCREMENT: + code &= table.data_mask; + if (code < table.id[low].code) + return -EAGAIN; /* Incorrect reading */ + + while (low <= high) { + if (code >= table.id[mid - 1].code && + code < table.id[mid].code) + break; + else if (code > table.id[mid].code) + low = mid + 1; + else + high = mid - 1; + + mid = (low + high) / 2; + } + break; + default: + pr_err("Invalid the conversion table\n"); } /* @@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp) * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ - num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; - num *= v2_code_table[mid - 1].code - code; - denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; - *temp = v2_code_table[mid - 1].temp + (num / denom); + num = table.id[mid].temp - v2_code_table[mid - 1].temp; + num *= abs(table.id[mid - 1].code - code); + denom = abs(table.id[mid - 1].code - table.id[mid].code); + *temp = table.id[mid - 1].temp + (num / denom); return 0; } /** - * rk_tsadcv2_initialize - initialize TASDC Controller - * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between - * every two accessing of TSADC in normal operation. - * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between - * every two accessing of TSADC after the temperature is higher - * than COM_SHUT or COM_INT. - * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, - * if the temperature is higher than COMP_INT or COMP_SHUT for - * "debounce" times, TSADC controller will generate interrupt or TSHUT. + * rk_tsadcv2_initialize - initialize TASDC Controller. + * + * (1) Set TSADC_V2_AUTO_PERIOD: + * Configure the interleave between every two accessing of + * TSADC in normal operation. + * + * (2) Set TSADCV2_AUTO_PERIOD_HT: + * Configure the interleave between every two accessing of + * TSADC after the temperature is higher than COM_SHUT or COM_INT. + * + * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: + * If the temperature is higher than COMP_INT or COMP_SHUT for + * "debounce" times, TSADC controller will generate interrupt or TSHUT. */ static void rk_tsadcv2_initialize(void __iomem *regs, enum tshut_polarity tshut_polarity) @@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable) writel_relaxed(val, regs + TSADCV2_AUTO_CON); } -static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) +static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int *temp) { u32 val; val = readl_relaxed(regs + TSADCV2_DATA(chn)); - return rk_tsadcv2_code_to_temp(val, temp); + return rk_tsadcv2_code_to_temp(table, val, temp); } -static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) +static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, + int chn, void __iomem *regs, int temp) { u32 tshut_value, val; - tshut_value = rk_tsadcv2_temp_to_code(temp); + tshut_value = rk_tsadcv2_temp_to_code(table, temp); writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ @@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, } static const struct rockchip_tsadc_chip rk3288_tsadc_data = { + .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */ + .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */ + .chn_num = 2, /* two channels for tsadc */ + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, @@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { .get_temp = rk_tsadcv2_get_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = v2_code_table, + .length = ARRAY_SIZE(v2_code_table), + .data_mask = TSADCV2_DATA_MASK, + .mode = ADC_DECREMENT, + }, +}; + +static const struct rockchip_tsadc_chip rk3368_tsadc_data = { + .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ + .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ + .chn_num = 2, /* two channels for tsadc */ + + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ + .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ + .tshut_temp = 95000, + + .initialize = rk_tsadcv2_initialize, + .irq_ack = rk_tsadcv2_irq_ack, + .control = rk_tsadcv2_control, + .get_temp = rk_tsadcv2_get_temp, + .set_tshut_temp = rk_tsadcv2_tshut_temp, + .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = v3_code_table, + .length = ARRAY_SIZE(v3_code_table), + .data_mask = TSADCV3_DATA_MASK, + .mode = ADC_INCREMENT, + }, }; static const struct of_device_id of_rockchip_thermal_match[] = { @@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { .compatible = "rockchip,rk3288-tsadc", .data = (void *)&rk3288_tsadc_data, }, + { + .compatible = "rockchip,rk3368-tsadc", + .data = (void *)&rk3368_tsadc_data, + }, { /* end */ }, }; MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); @@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) thermal->chip->irq_ack(thermal->regs); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) thermal_zone_device_update(thermal->sensors[i].tzd); return IRQ_HANDLED; @@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; - retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); + retval = tsadc->get_temp(tsadc->table, + sensor->id, thermal->regs, out_temp); dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", sensor->id, *out_temp, retval); @@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev, if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { dev_warn(dev, - "Missing tshut temp property, using default %ld\n", + "Missing tshut temp property, using default %d\n", thermal->chip->tshut_temp); thermal->tshut_temp = thermal->chip->tshut_temp; } else { @@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev, } if (thermal->tshut_temp > INT_MAX) { - dev_err(dev, "Invalid tshut temperature specified: %ld\n", + dev_err(dev, "Invalid tshut temperature specified: %d\n", thermal->tshut_temp); return -ERANGE; } @@ -442,13 +594,14 @@ static int rockchip_thermal_register_sensor(struct platform_device *pdev, struct rockchip_thermal_data *thermal, struct rockchip_thermal_sensor *sensor, - enum sensor_id id) + int id) { const struct rockchip_tsadc_chip *tsadc = thermal->chip; int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); + tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, + thermal->tshut_temp); sensor->thermal = thermal; sensor->id = id; @@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) const struct of_device_id *match; struct resource *res; int irq; - int i; + int i, j; int error; match = of_match_node(of_rockchip_thermal_match, np); @@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev) thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); - error = rockchip_thermal_register_sensor(pdev, thermal, - &thermal->sensors[0], - SENSOR_CPU); - if (error) { - dev_err(&pdev->dev, - "failed to register CPU thermal sensor: %d\n", error); - goto err_disable_pclk; - } - - error = rockchip_thermal_register_sensor(pdev, thermal, - &thermal->sensors[1], - SENSOR_GPU); - if (error) { - dev_err(&pdev->dev, - "failed to register GPU thermal sensor: %d\n", error); - goto err_unregister_cpu_sensor; + for (i = 0; i < thermal->chip->chn_num; i++) { + error = rockchip_thermal_register_sensor(pdev, thermal, + &thermal->sensors[i], + thermal->chip->chn_id[i]); + if (error) { + dev_err(&pdev->dev, + "failed to register sensor[%d] : error = %d\n", + i, error); + for (j = 0; j < i; j++) + thermal_zone_of_sensor_unregister(&pdev->dev, + thermal->sensors[j].tzd); + goto err_disable_pclk; + } } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, @@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev) if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); - goto err_unregister_gpu_sensor; + goto err_unregister_sensor; } thermal->chip->control(thermal->regs, true); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); platform_set_drvdata(pdev, thermal); return 0; -err_unregister_gpu_sensor: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); -err_unregister_cpu_sensor: - thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); +err_unregister_sensor: + while (i--) + thermal_zone_of_sensor_unregister(&pdev->dev, + thermal->sensors[i].tzd); + err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: @@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev) struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { + for (i = 0; i < thermal->chip->chn_num; i++) { struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; rockchip_thermal_toggle_sensor(sensor, false); @@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev) struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); thermal->chip->control(thermal->regs, false); @@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { - enum sensor_id id = thermal->sensors[i].id; + for (i = 0; i < thermal->chip->chn_num; i++) { + int id = thermal->sensors[i].id; thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - thermal->chip->set_tshut_temp(id, thermal->regs, + thermal->chip->set_tshut_temp(thermal->chip->table, + id, thermal->regs, thermal->tshut_temp); } thermal->chip->control(thermal->regs, true); - for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) + for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); pinctrl_pm_select_default_state(dev); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 13844261cd5f..ed776149261e 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty, { struct n_tty_data *ldata = tty->disc_data; - tty_audit_add_data(tty, to, n, ldata->icanon); + tty_audit_add_data(tty, from, n, ldata->icanon); return copy_to_user(to, from, n); } diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index c0533a57ec53..910bfee5a88b 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c @@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port) spin_unlock_irqrestore(&up->port.lock, flags); return 1; } +EXPORT_SYMBOL_GPL(fsl8250_handle_irq); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index e6f5e12a2d83..6412f1455beb 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -373,6 +373,7 @@ config SERIAL_8250_MID depends on SERIAL_8250 && PCI select HSU_DMA if SERIAL_8250_DMA select HSU_DMA_PCI if X86_INTEL_MID + select RATIONAL help Selecting this option will enable handling of the extra features present on the UART found on Intel Medfield SOC and various other diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 1aec4404062d..f38beb28e7ae 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART tristate "Freescale lpuart serial port support" depends on HAS_DMA select SERIAL_CORE - select SERIAL_EARLYCON help Support for the on-chip lpuart on some Freescale SOCs. @@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE bool "Console on Freescale lpuart serial port" depends on SERIAL_FSL_LPUART=y select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON help If you have enabled the lpuart serial port on the Freescale SoCs, you can make it the console by answering Y to this option. diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 681e0f3d5e0e..a1c0a89d9c7f 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port) /* register irq and enable rx interrupts */ ret = request_irq(port->irq, bcm_uart_interrupt, 0, - bcm_uart_type(port), port); + dev_name(port->dev), port); if (ret) return ret; bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index 6813e316e9ff..2f80bc7e44fb 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c @@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev) up->regi_ser = of_iomap(np, 0); up->port.dev = &pdev->dev; - up->gpios = mctrl_gpio_init(&pdev->dev, 0); + up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0); if (IS_ERR(up->gpios)) return PTR_ERR(up->gpios); diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index 90ca082935f6..3d245cd3d8e6 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c @@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, * * Audit @data of @size from @tty, if necessary. */ -void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, +void tty_audit_add_data(struct tty_struct *tty, const void *data, size_t size, unsigned icanon) { struct tty_audit_buf *buf; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 0c41dbcb90b8..bcc8e1e8bb72 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch) int was_stopped = tty->stopped; if (tty->ops->send_xchar) { + down_read(&tty->termios_rwsem); tty->ops->send_xchar(tty, ch); + up_read(&tty->termios_rwsem); return 0; } if (tty_write_lock(tty, 0) < 0) return -ERESTARTSYS; + down_read(&tty->termios_rwsem); if (was_stopped) start_tty(tty); tty->ops->write(tty, &ch, 1); if (was_stopped) stop_tty(tty); + up_read(&tty->termios_rwsem); tty_write_unlock(tty); return 0; } diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 9c5aebfe7053..1445dd39aa62 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, spin_unlock_irq(&tty->flow_lock); break; case TCIOFF: - down_read(&tty->termios_rwsem); if (STOP_CHAR(tty) != __DISABLED_CHAR) retval = tty_send_xchar(tty, STOP_CHAR(tty)); - up_read(&tty->termios_rwsem); break; case TCION: - down_read(&tty->termios_rwsem); if (START_CHAR(tty) != __DISABLED_CHAR) retval = tty_send_xchar(tty, START_CHAR(tty)); - up_read(&tty->termios_rwsem); break; default: return -EINVAL; diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 5af8f1874c1a..629e3c865072 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) /* Restart the work queue in case no characters kick it off. Safe if already running */ - schedule_work(&tty->port->buf.work); + tty_buffer_restart_work(tty->port); tty_unlock(tty); return retval; diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 6ccbf60cdd5c..5a048b7b92e8 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -84,6 +84,12 @@ struct ci_hdrc_imx_data { struct imx_usbmisc_data *usbmisc_data; bool supports_runtime_pm; bool in_lpm; + /* SoC before i.mx6 (except imx23/imx28) needs three clks */ + bool need_three_clks; + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_per; + /* --------------------------------- */ }; /* Common functions shared by usbmisc drivers */ @@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) } /* End of common functions shared by usbmisc drivers*/ +static int imx_get_clks(struct device *dev) +{ + struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); + int ret = 0; + + data->clk_ipg = devm_clk_get(dev, "ipg"); + if (IS_ERR(data->clk_ipg)) { + /* If the platform only needs one clocks */ + data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(data->clk)) { + ret = PTR_ERR(data->clk); + dev_err(dev, + "Failed to get clks, err=%ld,%ld\n", + PTR_ERR(data->clk), PTR_ERR(data->clk_ipg)); + return ret; + } + return ret; + } + + data->clk_ahb = devm_clk_get(dev, "ahb"); + if (IS_ERR(data->clk_ahb)) { + ret = PTR_ERR(data->clk_ahb); + dev_err(dev, + "Failed to get ahb clock, err=%d\n", ret); + return ret; + } + + data->clk_per = devm_clk_get(dev, "per"); + if (IS_ERR(data->clk_per)) { + ret = PTR_ERR(data->clk_per); + dev_err(dev, + "Failed to get per clock, err=%d\n", ret); + return ret; + } + + data->need_three_clks = true; + return ret; +} + +static int imx_prepare_enable_clks(struct device *dev) +{ + struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); + int ret = 0; + + if (data->need_three_clks) { + ret = clk_prepare_enable(data->clk_ipg); + if (ret) { + dev_err(dev, + "Failed to prepare/enable ipg clk, err=%d\n", + ret); + return ret; + } + + ret = clk_prepare_enable(data->clk_ahb); + if (ret) { + dev_err(dev, + "Failed to prepare/enable ahb clk, err=%d\n", + ret); + clk_disable_unprepare(data->clk_ipg); + return ret; + } + + ret = clk_prepare_enable(data->clk_per); + if (ret) { + dev_err(dev, + "Failed to prepare/enable per clk, err=%d\n", + ret); + clk_disable_unprepare(data->clk_ahb); + clk_disable_unprepare(data->clk_ipg); + return ret; + } + } else { + ret = clk_prepare_enable(data->clk); + if (ret) { + dev_err(dev, + "Failed to prepare/enable clk, err=%d\n", + ret); + return ret; + } + } + + return ret; +} + +static void imx_disable_unprepare_clks(struct device *dev) +{ + struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); + + if (data->need_three_clks) { + clk_disable_unprepare(data->clk_per); + clk_disable_unprepare(data->clk_ahb); + clk_disable_unprepare(data->clk_ipg); + } else { + clk_disable_unprepare(data->clk); + } +} static int ci_hdrc_imx_probe(struct platform_device *pdev) { @@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) .flags = CI_HDRC_SET_NON_ZERO_TTHA, }; int ret; - const struct of_device_id *of_id = - of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); - const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; + const struct of_device_id *of_id; + const struct ci_hdrc_imx_platform_flag *imx_platform_flag; + + of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); + if (!of_id) + return -ENODEV; + + imx_platform_flag = of_id->data; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + platform_set_drvdata(pdev, data); data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); if (IS_ERR(data->usbmisc_data)) return PTR_ERR(data->usbmisc_data); - data->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(data->clk)) { - dev_err(&pdev->dev, - "Failed to get clock, err=%ld\n", PTR_ERR(data->clk)); - return PTR_ERR(data->clk); - } + ret = imx_get_clks(&pdev->dev); + if (ret) + return ret; - ret = clk_prepare_enable(data->clk); - if (ret) { - dev_err(&pdev->dev, - "Failed to prepare or enable clock, err=%d\n", ret); + ret = imx_prepare_enable_clks(&pdev->dev); + if (ret) return ret; - } data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); if (IS_ERR(data->phy)) { @@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) goto disable_device; } - platform_set_drvdata(pdev, data); - if (data->supports_runtime_pm) { pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); @@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) disable_device: ci_hdrc_remove_device(data->ci_pdev); err_clk: - clk_disable_unprepare(data->clk); + imx_disable_unprepare_clks(&pdev->dev); return ret; } @@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); } ci_hdrc_remove_device(data->ci_pdev); - clk_disable_unprepare(data->clk); + imx_disable_unprepare_clks(&pdev->dev); return 0; } @@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev) dev_dbg(dev, "at %s\n", __func__); - clk_disable_unprepare(data->clk); + imx_disable_unprepare_clks(dev); data->in_lpm = true; return 0; @@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev) return 0; } - ret = clk_prepare_enable(data->clk); + ret = imx_prepare_enable_clks(dev); if (ret) return ret; @@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev) return 0; clk_disable: - clk_disable_unprepare(data->clk); + imx_disable_unprepare_clks(dev); return ret; } diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index 080b7be3daf0..58c8485a0715 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c @@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf, return -EINVAL; pm_runtime_get_sync(ci->dev); + disable_irq(ci->irq); ci_role_stop(ci); ret = ci_role_start(ci, role); + enable_irq(ci->irq); pm_runtime_put_sync(ci->dev); return ret ? ret : count; diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 8223fe73ea85..391a1225b0ba 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget, return retval; } +static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci) +{ + if (!ci_otg_is_fsm_mode(ci)) + return; + + mutex_lock(&ci->fsm.lock); + if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) { + ci->fsm.a_bidl_adis_tmout = 1; + ci_hdrc_otg_fsm_start(ci); + } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) { + ci->fsm.protocol = PROTO_UNDEF; + ci->fsm.otg->state = OTG_STATE_UNDEFINED; + } + mutex_unlock(&ci->fsm.lock); +} + /** * ci_udc_stop: unregister a gadget driver */ @@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget) ci->driver = NULL; spin_unlock_irqrestore(&ci->lock, flags); + ci_udc_stop_for_otg_fsm(ci); return 0; } diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index fcea4eb36eee..ab8b027e8cc8 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev) { struct resource *res; struct imx_usbmisc *data; - struct of_device_id *tmp_dev; + const struct of_device_id *of_id; + + of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev); + if (!of_id) + return -ENODEV; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev) if (IS_ERR(data->base)) return PTR_ERR(data->base); - tmp_dev = (struct of_device_id *) - of_match_device(usbmisc_imx_dt_ids, &pdev->dev); - data->ops = (const struct usbmisc_ops *)tmp_dev->data; + data->ops = (const struct usbmisc_ops *)of_id->data; platform_set_drvdata(pdev, data); return 0; diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 433bbc34a8a4..071964c7847f 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock) add_wait_queue(&usblp->wwait, &waita); for (;;) { - set_current_state(TASK_INTERRUPTIBLE); if (mutex_lock_interruptible(&usblp->mut)) { rc = -EINTR; break; } + set_current_state(TASK_INTERRUPTIBLE); rc = usblp_wtest(usblp, nonblock); mutex_unlock(&usblp->mut); if (rc <= 0) diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index a99c89e78126..dd280108758f 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB config USB_OTG_FSM tristate "USB 2.0 OTG FSM implementation" - depends on USB - select USB_OTG + depends on USB && USB_OTG select USB_PHY help Implements OTG Finite State Machine as specified in On-The-Go diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index e79baf73c234..571c21727ff9 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) */ static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) { - if (hsotg->lx_state == DWC2_L2) { + if (hsotg->bus_suspended) { hsotg->flags.b.port_suspend_change = 1; usb_hcd_resume_root_hub(hsotg->priv); - } else { - hsotg->flags.b.port_l1_change = 1; } + + if (hsotg->lx_state == DWC2_L1) + hsotg->flags.b.port_l1_change = 1; } /** @@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data) dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", dwc2_readl(hsotg->regs + HPRT0)); - hsotg->bus_suspended = 0; dwc2_hcd_rem_wakeup(hsotg); + hsotg->bus_suspended = 0; /* Change to L0 state */ hsotg->lx_state = DWC2_L0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 5859b0fa19ee..e61d773cf65e 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = { .host_ls_low_power_phy_clk = -1, .ts_dline = -1, .reload_ctl = -1, - .ahbcfg = 0x7, /* INCR16 */ + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, .uframe_sched = -1, .external_id_pin_ctl = -1, .hibernation = -1, diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 77a622cb48ab..009d83048c8c 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -34,6 +34,8 @@ #define PCI_DEVICE_ID_INTEL_BSW 0x22b7 #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 +#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa +#define PCI_DEVICE_ID_INTEL_APL 0x5aaa static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; @@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 55ba447fdf8b..e24a01cc98df 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2744,12 +2744,34 @@ int dwc3_gadget_init(struct dwc3 *dwc) } dwc->gadget.ops = &dwc3_gadget_ops; - dwc->gadget.max_speed = USB_SPEED_SUPER; dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; dwc->gadget.name = "dwc3-gadget"; /* + * FIXME We might be setting max_speed to <SUPER, however versions + * <2.20a of dwc3 have an issue with metastability (documented + * elsewhere in this driver) which tells us we can't set max speed to + * anything lower than SUPER. + * + * Because gadget.max_speed is only used by composite.c and function + * drivers (i.e. it won't go into dwc3's registers) we are allowing this + * to happen so we avoid sending SuperSpeed Capability descriptor + * together with our BOS descriptor as that could confuse host into + * thinking we can handle super speed. + * + * Note that, in fact, we won't even support GetBOS requests when speed + * is less than super speed because we don't have means, yet, to tell + * composite.c that we are USB 2.0 + LPM ECN. + */ + if (dwc->revision < DWC3_REVISION_220A) + dwc3_trace(trace_dwc3_gadget, + "Changing max_speed on rev %08x\n", + dwc->revision); + + dwc->gadget.max_speed = dwc->maximum_speed; + + /* * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize * on ep out. */ diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 23933bdf2d9d..ddc3aad886b7 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c @@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev, for (i = 0; i < loop->qlen && result == 0; i++) { result = -ENOMEM; - in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); + in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC); if (!in_req) goto fail; diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index f0f2b066ac08..f92f5aff0dd5 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) spin_lock(&udc->lock); int_enb = usba_int_enb_get(udc); - status = usba_readl(udc, INT_STA) & int_enb; + status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED); DBG(DBG_INT, "irq, status=%#08x\n", status); if (status & USBA_DET_SUSPEND) { diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 5d2d7e954bd4..0230965fb78c 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_SUSPEND; } } - if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 - && (raw_port_status & PORT_POWER) - && (bus_state->suspended_ports & (1 << wIndex))) { - bus_state->suspended_ports &= ~(1 << wIndex); - if (hcd->speed < HCD_USB3) - bus_state->port_c_suspend |= 1 << wIndex; + if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 && + (raw_port_status & PORT_POWER)) { + if (bus_state->suspended_ports & (1 << wIndex)) { + bus_state->suspended_ports &= ~(1 << wIndex); + if (hcd->speed < HCD_USB3) + bus_state->port_c_suspend |= 1 << wIndex; + } + bus_state->resume_done[wIndex] = 0; + clear_bit(wIndex, &bus_state->resuming_ports); } if (raw_port_status & PORT_CONNECT) { status |= USB_PORT_STAT_CONNECTION; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fa836251ca21..6c5e8133cf87 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3896,28 +3896,6 @@ cleanup: return ret; } -static int ep_ring_is_processing(struct xhci_hcd *xhci, - int slot_id, unsigned int ep_index) -{ - struct xhci_virt_device *xdev; - struct xhci_ring *ep_ring; - struct xhci_ep_ctx *ep_ctx; - struct xhci_virt_ep *xep; - dma_addr_t hw_deq; - - xdev = xhci->devs[slot_id]; - xep = &xhci->devs[slot_id]->eps[ep_index]; - ep_ring = xep->ring; - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - - if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING) - return 0; - - hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; - return (hw_deq != - xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue)); -} - /* * Check transfer ring to guarantee there is enough room for the urb. * Update ISO URB start_frame and interval. @@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, } /* Calculate the start frame and put it in urb->start_frame. */ - if (HCC_CFC(xhci->hcc_params) && - ep_ring_is_processing(xhci, slot_id, ep_index)) { - urb->start_frame = xep->next_frame_id; - goto skip_start_over; + if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { + if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == + EP_STATE_RUNNING) { + urb->start_frame = xep->next_frame_id; + goto skip_start_over; + } } start_frame = readl(&xhci->run_regs->microframe_index); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6e7dc6f93978..dfa44d3e8eee 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci) command |= CMD_RESET; writel(command, &xhci->op_regs->command); + /* Existing Intel xHCI controllers require a delay of 1 mS, + * after setting the CMD_RESET bit, and before accessing any + * HC registers. This allows the HC to complete the + * reset operation and be ready for HC register access. + * Without this delay, the subsequent HC register access, + * may result in a system hang very rarely. + */ + if (xhci->quirks & XHCI_INTEL_HOST) + udelay(1000); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, 10 * 1000 * 1000); if (ret) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ba13529cbd52..18cfc0a361cb 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev) /*-------------------------------------------------------------------------*/ #ifndef CONFIG_BLACKFIN -static int musb_ulpi_read(struct usb_phy *phy, u32 offset) +static int musb_ulpi_read(struct usb_phy *phy, u32 reg) { void __iomem *addr = phy->io_priv; int i = 0; @@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset) * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. */ - musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); + musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); @@ -176,7 +176,7 @@ out: return ret; } -static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) +static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg) { void __iomem *addr = phy->io_priv; int i = 0; @@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) power &= ~MUSB_POWER_SUSPENDM; musb_writeb(addr, MUSB_POWER, power); - musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); - musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); + musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); + musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val); musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) @@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt); static bool use_dma = 1; /* "modprobe ... use_dma=0" etc */ -module_param(use_dma, bool, 0); +module_param(use_dma, bool, 0644); MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 26c65e66cc0f..795a45b1b25b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) struct musb *musb = ep->musb; void __iomem *epio = ep->regs; u16 csr; - u16 lastcsr = 0; int retries = 1000; csr = musb_readw(epio, MUSB_TXCSR); while (csr & MUSB_TXCSR_FIFONOTEMPTY) { - if (csr != lastcsr) - dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); - lastcsr = csr; csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; musb_writew(epio, MUSB_TXCSR, csr); csr = musb_readw(epio, MUSB_TXCSR); - if (WARN(retries-- < 1, + + /* + * FIXME: sometimes the tx fifo flush failed, it has been + * observed during device disconnect on AM335x. + * + * To reproduce the issue, ensure tx urb(s) are queued when + * unplug the usb device which is connected to AM335x usb + * host port. + * + * I found using a usb-ethernet device and running iperf + * (client on AM335x) has very high chance to trigger it. + * + * Better to turn on dev_dbg() in musb_cleanup_urb() with + * CPPI enabled to see the issue when aborting the tx channel. + */ + if (dev_WARN_ONCE(musb->controller, retries-- < 1, "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr)) return; - mdelay(1); } } diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 173132416170..22e8ecb6bfbd 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -21,7 +21,6 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM - select USB_OTG select USB_PHY help Enable this to support Freescale USB OTG transceiver. @@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY config USB_MV_OTG tristate "Marvell USB OTG support" - depends on USB_EHCI_MV && USB_MV_UDC && PM - select USB_OTG + depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG select USB_PHY help Say Y here if you want to build Marvell USB OTG transciever diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 4d863ebc117c..b7536af777ab 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev) struct clk *clk; struct mxs_phy *mxs_phy; int ret; - const struct of_device_id *of_id = - of_match_device(mxs_phy_dt_ids, &pdev->dev); + const struct of_device_id *of_id; struct device_node *np = pdev->dev.of_node; + of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev); + if (!of_id) + return -ENODEV; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 1270906ccb95..c4bf2de6d14e 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c @@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev) extcon = extcon_get_extcon_dev(config->extcon); if (!extcon) return -EPROBE_DEFER; - otg_dev->extcon = extcon; otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); if (!otg_dev) @@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev) if (IS_ERR(otg_dev->base)) return PTR_ERR(otg_dev->base); + otg_dev->extcon = extcon; otg_dev->id_nb.notifier_call = omap_otg_id_notifier; otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 685fef71d3d1..f2280606b73c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 #define NOVATELWIRELESS_PRODUCT_E362 0x9010 #define NOVATELWIRELESS_PRODUCT_E371 0x9011 +#define NOVATELWIRELESS_PRODUCT_U620L 0x9022 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 @@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb); /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 +#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 /* iBall 3.5G connect wireless modem */ #define IBALL_3_5G_CONNECT 0x9605 @@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = { .sendsetup = BIT(0) | BIT(1), }; +static const struct option_blacklist_info four_g_w100_blacklist = { + .sendsetup = BIT(1) | BIT(2), + .reserved = BIT(3), +}; + static const struct option_blacklist_info alcatel_x200_blacklist = { .sendsetup = BIT(0) | BIT(1), .reserved = BIT(4), @@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), + .driver_info = (kernel_ulong_t)&four_g_w100_blacklist + }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 5022fcfa0260..9919d2a9faf2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -22,6 +22,8 @@ #define DRIVER_AUTHOR "Qualcomm Inc" #define DRIVER_DESC "Qualcomm USB Serial driver" +#define QUECTEL_EC20_PID 0x9215 + /* standard device layouts supported by this driver */ enum qcserial_layouts { QCSERIAL_G2K = 0, /* Gobi 2000 */ @@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = { }; MODULE_DEVICE_TABLE(usb, id_table); +static int handle_quectel_ec20(struct device *dev, int ifnum) +{ + int altsetting = 0; + + /* + * Quectel EC20 Mini PCIe LTE module layout: + * 0: DM/DIAG (use libqcdm from ModemManager for communication) + * 1: NMEA + * 2: AT-capable modem port + * 3: Modem interface + * 4: NDIS + */ + switch (ifnum) { + case 0: + dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n"); + break; + case 1: + dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n"); + break; + case 2: + case 3: + dev_dbg(dev, "Quectel EC20 Modem port found\n"); + break; + case 4: + /* Don't claim the QMI/net interface */ + altsetting = -1; + break; + } + + return altsetting; +} + static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_host_interface *intf = serial->interface->cur_altsetting; @@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) int altsetting = -1; bool sendsetup = false; + /* we only support vendor specific functions */ + if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) + goto done; + nintf = serial->dev->actconfig->desc.bNumInterfaces; dev_dbg(dev, "Num Interfaces = %d\n", nintf); ifnum = intf->desc.bInterfaceNumber; @@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) altsetting = -1; break; case QCSERIAL_G2K: + /* handle non-standard layouts */ + if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) { + altsetting = handle_quectel_ec20(dev, ifnum); + goto done; + } + /* * Gobi 2K+ USB layout: * 0: QMI/net @@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) break; case QCSERIAL_HWI: /* - * Huawei layout: - * 0: AT-capable modem port - * 1: DM/DIAG - * 2: AT-capable modem port - * 3: CCID-compatible PCSC interface - * 4: QMI/net - * 5: NMEA + * Huawei devices map functions by subclass + protocol + * instead of interface numbers. The protocol identify + * a specific function, while the subclass indicate a + * specific firmware source + * + * This is a blacklist of functions known to be + * non-serial. The rest are assumed to be serial and + * will be handled by this driver */ - switch (ifnum) { - case 0: - case 2: - dev_dbg(dev, "Modem port found\n"); - break; - case 1: - dev_dbg(dev, "DM/DIAG interface found\n"); - break; - case 5: - dev_dbg(dev, "NMEA GPS interface found\n"); - break; - default: - /* don't claim any unsupported interface */ + switch (intf->desc.bInterfaceProtocol) { + /* QMI combined (qmi_wwan) */ + case 0x07: + case 0x37: + case 0x67: + /* QMI data (qmi_wwan) */ + case 0x08: + case 0x38: + case 0x68: + /* QMI control (qmi_wwan) */ + case 0x09: + case 0x39: + case 0x69: + /* NCM like (huawei_cdc_ncm) */ + case 0x16: + case 0x46: + case 0x76: altsetting = -1; break; + default: + dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n", + intf->desc.bInterfaceClass, + intf->desc.bInterfaceSubClass, + intf->desc.bInterfaceProtocol); } break; default: diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index e9da41d9fe7f..2694df2f4559 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = { { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, + { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, { } /* terminator */ }; @@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = { { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, + { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, { } /* terminator */ }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index 4a2423e84d55..98f35c656c02 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h @@ -56,6 +56,10 @@ #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID #define ABBOTT_STRIP_PORT_ID 0x3420 +/* Honeywell vendor and product IDs */ +#define HONEYWELL_VENDOR_ID 0x10ac +#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */ + /* Commands */ #define TI_GET_VERSION 0x01 #define TI_GET_PORT_STATUS 0x02 diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 7a8a6c6952e9..1c427beffadd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG config IMX2_WDT tristate "IMX2+ Watchdog" - depends on ARCH_MXC + depends on ARCH_MXC || ARCH_LAYERSCAPE select REGMAP_MMIO select WATCHDOG_CORE help diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 6ad9df948711..b751f43d76ed 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev) reg = readl(wdt_base + WDT_MODE); reg &= ~WDT_MODE_EN; + reg |= WDT_MODE_KEY; iowrite32(reg, wdt_base + WDT_MODE); return 0; diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index d96bee017fd3..6f17c935a6cf 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog, static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); void __iomem *base = wdev->base; u32 value; diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 4224b3ec83a5..313cd1c6fda0 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT; static DEFINE_SPINLOCK(io_lock); static void __iomem *wdt_base; -struct clk *wdt_clk; +static struct clk *wdt_clk; static int pnx4008_wdt_start(struct watchdog_device *wdd) { @@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdt_clk)) return PTR_ERR(wdt_clk); - ret = clk_enable(wdt_clk); + ret = clk_prepare_enable(wdt_clk); if (ret) return ret; @@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev) return 0; disable_clk: - clk_disable(wdt_clk); + clk_disable_unprepare(wdt_clk); return ret; } @@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev) { watchdog_unregister_device(&pnx4008_wdd); - clk_disable(wdt_clk); + clk_disable_unprepare(wdt_clk); return 0; } diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c index 7f97cdd53f29..9ec57608da82 100644 --- a/drivers/watchdog/tegra_wdt.c +++ b/drivers/watchdog/tegra_wdt.c @@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd, { wdd->timeout = timeout; - if (watchdog_active(wdd)) + if (watchdog_active(wdd)) { + tegra_wdt_stop(wdd); return tegra_wdt_start(wdd); + } return 0; } diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 91bf55a20024..20e2bba10400 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -224,7 +224,7 @@ static int wdt_keepalive(void) static int wdt_set_timeout(int t) { - int tmrval; + unsigned int tmrval; /* * Convert seconds to watchdog counter time units, rounding up. diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 849500e4e14d..524c22146429 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -39,6 +39,7 @@ #include <asm/irq.h> #include <asm/idle.h> #include <asm/io_apic.h> +#include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> @@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ - if (gsi < NR_IRQS_LEGACY) + if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); @@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq) kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < NR_IRQS_LEGACY) + if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 00f40f051d95..38272ad24551 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -49,6 +49,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/cpu.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> #include <xen/xen.h> #include <xen/events.h> @@ -58,10 +60,10 @@ struct per_user_data { struct mutex bind_mutex; /* serialize bind/unbind operations */ struct rb_root evtchns; + unsigned int nr_evtchns; /* Notification ring, accessed via /dev/xen/evtchn. */ -#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) -#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) + unsigned int ring_size; evtchn_port_t *ring; unsigned int ring_cons, ring_prod, ring_overflow; struct mutex ring_cons_mutex; /* protect against concurrent readers */ @@ -80,10 +82,41 @@ struct user_evtchn { bool enabled; }; +static evtchn_port_t *evtchn_alloc_ring(unsigned int size) +{ + evtchn_port_t *ring; + size_t s = size * sizeof(*ring); + + ring = kmalloc(s, GFP_KERNEL); + if (!ring) + ring = vmalloc(s); + + return ring; +} + +static void evtchn_free_ring(evtchn_port_t *ring) +{ + kvfree(ring); +} + +static unsigned int evtchn_ring_offset(struct per_user_data *u, + unsigned int idx) +{ + return idx & (u->ring_size - 1); +} + +static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, + unsigned int idx) +{ + return u->ring + evtchn_ring_offset(u, idx); +} + static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) { struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; + u->nr_evtchns++; + while (*new) { struct user_evtchn *this; @@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) { + u->nr_evtchns--; rb_erase(&evtchn->node, &u->evtchns); kfree(evtchn); } @@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) spin_lock(&u->ring_prod_lock); - if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { - u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; + if ((u->ring_prod - u->ring_cons) < u->ring_size) { + *evtchn_ring_entry(u, u->ring_prod) = evtchn->port; wmb(); /* Ensure ring contents visible */ if (u->ring_cons == u->ring_prod++) { wake_up_interruptible(&u->evtchn_wait); @@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, } /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ - if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { - bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * + if (((c ^ p) & u->ring_size) != 0) { + bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * sizeof(evtchn_port_t); - bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); + bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); } else { bytes1 = (p - c) * sizeof(evtchn_port_t); bytes2 = 0; @@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf, rc = -EFAULT; rmb(); /* Ensure that we see the port before we copy it. */ - if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || + if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) goto unlock_out; @@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, return rc; } +static int evtchn_resize_ring(struct per_user_data *u) +{ + unsigned int new_size; + evtchn_port_t *new_ring, *old_ring; + unsigned int p, c; + + /* + * Ensure the ring is large enough to capture all possible + * events. i.e., one free slot for each bound event. + */ + if (u->nr_evtchns <= u->ring_size) + return 0; + + if (u->ring_size == 0) + new_size = 64; + else + new_size = 2 * u->ring_size; + + new_ring = evtchn_alloc_ring(new_size); + if (!new_ring) + return -ENOMEM; + + old_ring = u->ring; + + /* + * Access to the ring contents is serialized by either the + * prod /or/ cons lock so take both when resizing. + */ + mutex_lock(&u->ring_cons_mutex); + spin_lock_irq(&u->ring_prod_lock); + + /* + * Copy the old ring contents to the new ring. + * + * If the ring contents crosses the end of the current ring, + * it needs to be copied in two chunks. + * + * +---------+ +------------------+ + * |34567 12| -> | 1234567 | + * +-----p-c-+ +------------------+ + */ + p = evtchn_ring_offset(u, u->ring_prod); + c = evtchn_ring_offset(u, u->ring_cons); + if (p < c) { + memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); + memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); + } else + memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); + + u->ring = new_ring; + u->ring_size = new_size; + + spin_unlock_irq(&u->ring_prod_lock); + mutex_unlock(&u->ring_cons_mutex); + + evtchn_free_ring(old_ring); + + return 0; +} + static int evtchn_bind_to_user(struct per_user_data *u, int port) { struct user_evtchn *evtchn; @@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) if (rc < 0) goto err; + rc = evtchn_resize_ring(u); + if (rc < 0) + goto err; + rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, u->name, evtchn); if (rc < 0) @@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp) init_waitqueue_head(&u->evtchn_wait); - u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL); - if (u->ring == NULL) { - kfree(u->name); - kfree(u); - return -ENOMEM; - } - mutex_init(&u->bind_mutex); mutex_init(&u->ring_cons_mutex); spin_lock_init(&u->ring_prod_lock); @@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp) evtchn_unbind_from_user(u, evtchn); } - free_page((unsigned long)u->ring); + evtchn_free_ring(u->ring); kfree(u->name); kfree(u); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ea0b3b2a91d..1be5dd048622 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; if (use_ptemod) vma->vm_flags |= VM_DONTCOPY; |