diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.c | 2 | ||||
-rw-r--r-- | drivers/iio/dummy/iio_simple_dummy_buffer.c | 48 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_common.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bcmsysport.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_rdma.c | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_roce.c | 2 |
11 files changed, 48 insertions, 94 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index b87f550af26b..5f8809f6990d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -781,7 +781,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) goto failed; } - bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); + bitmap_to_arr32(feature_mask, feature->allowed, 64); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, feature_mask[1], NULL); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 7be4f6875a7b..ef9b56de143b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -837,7 +837,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu) feature->feature_num < 64) return -EINVAL; - bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); + bitmap_to_arr32(feature_mask, feature->allowed, 64); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, feature_mask[1], NULL); diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 3e3b09588fd3..958b37123bf1 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1047,7 +1047,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) GEM_BUG_ON(!pmu->base.event_init); /* Select the first online CPU as a designated reader. */ - if (!cpumask_weight(&i915_pmu_cpumask)) + if (cpumask_empty(&i915_pmu_cpumask)) cpumask_set_cpu(cpu, &i915_pmu_cpumask); return 0; diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c index d81c2b2dad82..9b2f99449a82 100644 --- a/drivers/iio/dummy/iio_simple_dummy_buffer.c +++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c @@ -45,41 +45,31 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; + int i = 0, j; u16 *data; data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); if (!data) goto done; - if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) { - /* - * Three common options here: - * hardware scans: certain combinations of channels make - * up a fast read. The capture will consist of all of them. - * Hence we just call the grab data function and fill the - * buffer without processing. - * software scans: can be considered to be random access - * so efficient reading is just a case of minimal bus - * transactions. - * software culled hardware scans: - * occasionally a driver may process the nearest hardware - * scan to avoid storing elements that are not desired. This - * is the fiddliest option by far. - * Here let's pretend we have random access. And the values are - * in the constant table fakedata. - */ - int i, j; - - for (i = 0, j = 0; - i < bitmap_weight(indio_dev->active_scan_mask, - indio_dev->masklength); - i++, j++) { - j = find_next_bit(indio_dev->active_scan_mask, - indio_dev->masklength, j); - /* random access read from the 'device' */ - data[i] = fakedata[j]; - } - } + /* + * Three common options here: + * hardware scans: + * certain combinations of channels make up a fast read. The capture + * will consist of all of them. Hence we just call the grab data + * function and fill the buffer without processing. + * software scans: + * can be considered to be random access so efficient reading is just + * a case of minimal bus transactions. + * software culled hardware scans: + * occasionally a driver may process the nearest hardware scan to avoid + * storing elements that are not desired. This is the fiddliest option + * by far. + * Here let's pretend we have random access. And the values are in the + * constant table fakedata. + */ + for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength) + data[i++] = fakedata[j]; iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns(indio_dev)); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index fbb32aa49b24..48cf344750ff 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1603,12 +1603,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, return 0; } - if (bitmap_weight(free_bins, dev->num_arl_bins) == 0) - return -ENOSPC; - *idx = find_first_bit(free_bins, dev->num_arl_bins); - - return -ENOENT; + return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; } static int b53_arl_op(struct b53_device *dev, int op, int port, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 3272aca496dc..47fc8e6963d5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2180,13 +2180,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) return -EOPNOTSUPP; - /* All filters are already in use, we cannot match more rules */ - if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == - RXCHK_BRCM_TAG_MAX) - return -ENOSPC; - index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); if (index >= RXCHK_BRCM_TAG_MAX) + /* All filters are already in use, we cannot match more rules */ return -ENOSPC; /* Location is the classification ID, and index is the position diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 54f235c216a9..2dd192b5e4e0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -355,7 +355,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); - if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap, pf->flow_cfg->dmacflt_max_flows)) netdev_warn(netdev, "Add %pM to CGX/RPM DMAC filters list as well\n", @@ -438,7 +438,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) return 0; if (flow_cfg->nr_flows == flow_cfg->max_flows || - bitmap_weight(&flow_cfg->dmacflt_bmap, + !bitmap_empty(&flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; else diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index fe3472e04c23..9106c359e64c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1120,7 +1120,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; - if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap, pf->flow_cfg->dmacflt_max_flows)) netdev_warn(pf->netdev, "CGX/RPM internal loopback might not work as DMAC filters are active\n"); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e10b7b04b894..c56d2194cbfc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1994,21 +1994,16 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { - int port, err; + int p, port, err; struct mlx4_vport_state *vp_admin; struct mlx4_vport_oper_state *vp_oper; struct mlx4_slave_state *slave_state = &priv->mfunc.master.slave_state[slave]; struct mlx4_active_ports actv_ports = mlx4_get_active_ports( &priv->dev, slave); - int min_port = find_first_bit(actv_ports.ports, - priv->dev.caps.num_ports) + 1; - int max_port = min_port - 1 + - bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); - for (port = min_port; port <= max_port; port++) { - if (!test_bit(port - 1, actv_ports.ports)) - continue; + for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) { + port = p + 1; priv->mfunc.master.vf_oper[slave].smi_enabled[port] = priv->mfunc.master.vf_admin[slave].enable_smi[port]; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; @@ -2063,19 +2058,13 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) { - int port; + int p, port; struct mlx4_vport_oper_state *vp_oper; struct mlx4_active_ports actv_ports = mlx4_get_active_ports( &priv->dev, slave); - int min_port = find_first_bit(actv_ports.ports, - priv->dev.caps.num_ports) + 1; - int max_port = min_port - 1 + - bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); - - for (port = min_port; port <= max_port; port++) { - if (!test_bit(port - 1, actv_ports.ports)) - continue; + for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) { + port = p + 1; priv->mfunc.master.vf_oper[slave].smi_enabled[port] = MLX4_VF_SMI_DISABLED; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 23b668de4640..69b0ede75cae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -319,44 +319,27 @@ free_rdma_dev: void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check) { - int weight = bitmap_weight(bmap->bitmap, bmap->max_count); - int last_line = bmap->max_count / (64 * 8); - int last_item = last_line * 8 + - DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); - u64 *pmap = (u64 *)bmap->bitmap; - int line, item, offset; - u8 str_last_line[200] = { 0 }; - - if (!weight || !check) + unsigned int bit, weight, nbits; + unsigned long *b; + + if (!check) + goto end; + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + if (!weight) goto end; DP_NOTICE(p_hwfn, "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", bmap->name, bmap->max_count, weight); - /* print aligned non-zero lines, if any */ - for (item = 0, line = 0; line < last_line; line++, item += 8) - if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) + for (bit = 0; bit < bmap->max_count; bit += 512) { + b = bmap->bitmap + BITS_TO_LONGS(bit); + nbits = min(bmap->max_count - bit, 512U); + + if (!bitmap_empty(b, nbits)) DP_NOTICE(p_hwfn, - "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", - line, - pmap[item], - pmap[item + 1], - pmap[item + 2], - pmap[item + 3], - pmap[item + 4], - pmap[item + 5], - pmap[item + 6], pmap[item + 7]); - - /* print last unaligned non-zero line, if any */ - if ((bmap->max_count % (64 * 8)) && - (bitmap_weight((unsigned long *)&pmap[item], - bmap->max_count - item * 64))) { - offset = sprintf(str_last_line, "line 0x%04x: ", line); - for (; item < last_item; item++) - offset += sprintf(str_last_line + offset, - "0x%016llx ", pmap[item]); - DP_NOTICE(p_hwfn, "%s\n", str_last_line); + "line 0x%04x: %*pb\n", bit / 512, nbits, b); } end: diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 071b4aeaddf2..134ecfca96a3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -76,7 +76,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) * We delay for a short while if an async destroy QP is still expected. * Beyond the added delay we clear the bitmap anyway. */ - while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { + while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) { /* If the HW device is during recovery, all resources are * immediately reset without receiving a per-cid indication * from HW. In this case we don't expect the cid bitmap to be |