summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-06-19 12:01:39 +1000
committerDave Airlie <airlied@redhat.com>2015-06-19 12:01:39 +1000
commit26093813eabebcd3c9967f58a2581df45e91c2fe (patch)
tree41f1a38ab080b18507110ea06b47ed6d8aa2ec21
parent0dc499aff57f5e787214d5085dd1c391e0f93710 (diff)
parent4ed9fb371ccdfe465bd3bbb69e4cad5243e6c4e2 (diff)
Merge tag 'drm-intel-next-fixes-2015-06-18' of git://anongit.freedesktop.org/drm-intel into drm-next
i915 fixes for stuff in next * tag 'drm-intel-next-fixes-2015-06-18' of git://anongit.freedesktop.org/drm-intel: drm/i915: Don't set enabled value of all CRTCs when restoring the mode drm/i915: Don't update staged config during force restore modesets drm/i915: Don't check modeset state in the hw state force restore path drm/i915: Add SCRATCH1 and ROW_CHICKEN3 to the register whitelist. drm/i915: Extend the parser to check register writes against a mask/value pair. drm/i915: Fix command parser to validate multiple register access with the same command. drm/i915: Don't skip request retirement if the active list is empty
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c197
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h5
5 files changed, 164 insertions, 100 deletions
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9605ff8f2fcd..306d9e4e5cf3 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -123,7 +123,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
- .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
@@ -395,16 +395,38 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
/*
* Register whitelists, sorted by increasing register offset.
+ */
+
+/*
+ * An individual whitelist entry granting access to register addr. If
+ * mask is non-zero the argument of immediate register writes will be
+ * AND-ed with mask, and the command will be rejected if the result
+ * doesn't match value.
+ *
+ * Registers with non-zero mask are only allowed to be written using
+ * LRI.
+ */
+struct drm_i915_reg_descriptor {
+ u32 addr;
+ u32 mask;
+ u32 value;
+};
+
+/* Convenience macro for adding 32-bit registers. */
+#define REG32(address, ...) \
+ { .addr = address, __VA_ARGS__ }
+
+/*
+ * Convenience macro for adding 64-bit registers.
*
* Some registers that userspace accesses are 64 bits. The register
* access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers.
*/
+#define REG64(addr) \
+ REG32(addr), REG32(addr + sizeof(u32))
-/* Convenience macro for adding 64-bit registers */
-#define REG64(addr) (addr), (addr + sizeof(u32))
-
-static const u32 gen7_render_regs[] = {
+static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
@@ -417,15 +439,15 @@ static const u32 gen7_render_regs[] = {
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
- OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
- GEN7_3DPRIM_END_OFFSET,
- GEN7_3DPRIM_START_VERTEX,
- GEN7_3DPRIM_VERTEX_COUNT,
- GEN7_3DPRIM_INSTANCE_COUNT,
- GEN7_3DPRIM_START_INSTANCE,
- GEN7_3DPRIM_BASE_VERTEX,
+ REG32(GEN7_3DPRIM_END_OFFSET),
+ REG32(GEN7_3DPRIM_START_VERTEX),
+ REG32(GEN7_3DPRIM_VERTEX_COUNT),
+ REG32(GEN7_3DPRIM_INSTANCE_COUNT),
+ REG32(GEN7_3DPRIM_START_INSTANCE),
+ REG32(GEN7_3DPRIM_BASE_VERTEX),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
@@ -434,33 +456,41 @@ static const u32 gen7_render_regs[] = {
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
- GEN7_SO_WRITE_OFFSET(0),
- GEN7_SO_WRITE_OFFSET(1),
- GEN7_SO_WRITE_OFFSET(2),
- GEN7_SO_WRITE_OFFSET(3),
- GEN7_L3SQCREG1,
- GEN7_L3CNTLREG2,
- GEN7_L3CNTLREG3,
+ REG32(GEN7_SO_WRITE_OFFSET(0)),
+ REG32(GEN7_SO_WRITE_OFFSET(1)),
+ REG32(GEN7_SO_WRITE_OFFSET(2)),
+ REG32(GEN7_SO_WRITE_OFFSET(3)),
+ REG32(GEN7_L3SQCREG1),
+ REG32(GEN7_L3CNTLREG2),
+ REG32(GEN7_L3CNTLREG3),
+ REG32(HSW_SCRATCH1,
+ .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
+ .value = 0),
+ REG32(HSW_ROW_CHICKEN3,
+ .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
+ HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ .value = 0),
};
-static const u32 gen7_blt_regs[] = {
- BCS_SWCTRL,
+static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
+ REG32(BCS_SWCTRL),
};
-static const u32 ivb_master_regs[] = {
- FORCEWAKE_MT,
- DERRMR,
- GEN7_PIPE_DE_LOAD_SL(PIPE_A),
- GEN7_PIPE_DE_LOAD_SL(PIPE_B),
- GEN7_PIPE_DE_LOAD_SL(PIPE_C),
+static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
+ REG32(FORCEWAKE_MT),
+ REG32(DERRMR),
+ REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
+ REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
+ REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
};
-static const u32 hsw_master_regs[] = {
- FORCEWAKE_MT,
- DERRMR,
+static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
+ REG32(FORCEWAKE_MT),
+ REG32(DERRMR),
};
#undef REG64
+#undef REG32
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
@@ -550,14 +580,16 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
return ret;
}
-static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+static bool check_sorted(int ring_id,
+ const struct drm_i915_reg_descriptor *reg_table,
+ int reg_count)
{
int i;
u32 previous = 0;
bool ret = true;
for (i = 0; i < reg_count; i++) {
- u32 curr = reg_table[i];
+ u32 curr = reg_table[i].addr;
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
@@ -804,18 +836,20 @@ find_cmd(struct intel_engine_cs *ring,
return default_desc;
}
-static bool valid_reg(const u32 *table, int count, u32 addr)
+static const struct drm_i915_reg_descriptor *
+find_reg(const struct drm_i915_reg_descriptor *table,
+ int count, u32 addr)
{
- if (table && count != 0) {
+ if (table) {
int i;
for (i = 0; i < count; i++) {
- if (table[i] == addr)
- return true;
+ if (table[i].addr == addr)
+ return &table[i];
}
}
- return false;
+ return NULL;
}
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
@@ -934,7 +968,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd,
+ const u32 *cmd, u32 length,
const bool is_master,
bool *oacontrol_set)
{
@@ -950,38 +984,70 @@ static bool check_cmd(const struct intel_engine_cs *ring,
}
if (desc->flags & CMD_DESC_REGISTER) {
- u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
-
/*
- * OACONTROL requires some special handling for writes. We
- * want to make sure that any batch which enables OA also
- * disables it before the end of the batch. The goal is to
- * prevent one process from snooping on the perf data from
- * another process. To do that, we need to check the value
- * that will be written to the register. Hence, limit
- * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
+ * Get the distance between individual register offset
+ * fields if the command can perform more than one
+ * access at a time.
*/
- if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
+ const u32 step = desc->reg.step ? desc->reg.step : length;
+ u32 offset;
+
+ for (offset = desc->reg.offset; offset < length;
+ offset += step) {
+ const u32 reg_addr = cmd[offset] & desc->reg.mask;
+ const struct drm_i915_reg_descriptor *reg =
+ find_reg(ring->reg_table, ring->reg_count,
+ reg_addr);
+
+ if (!reg && is_master)
+ reg = find_reg(ring->master_reg_table,
+ ring->master_reg_count,
+ reg_addr);
+
+ if (!reg) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+ reg_addr, *cmd, ring->id);
return false;
}
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
- *oacontrol_set = (cmd[2] != 0);
- }
+ /*
+ * OACONTROL requires some special handling for
+ * writes. We want to make sure that any batch which
+ * enables OA also disables it before the end of the
+ * batch. The goal is to prevent one process from
+ * snooping on the perf data from another process. To do
+ * that, we need to check the value that will be written
+ * to the register. Hence, limit OACONTROL writes to
+ * only MI_LOAD_REGISTER_IMM commands.
+ */
+ if (reg_addr == OACONTROL) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
+ return false;
+ }
+
+ if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
+ *oacontrol_set = (cmd[offset + 1] != 0);
+ }
- if (!valid_reg(ring->reg_table,
- ring->reg_count, reg_addr)) {
- if (!is_master ||
- !valid_reg(ring->master_reg_table,
- ring->master_reg_count,
- reg_addr)) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
- reg_addr,
- *cmd,
- ring->id);
- return false;
+ /*
+ * Check the value written to the register against the
+ * allowed mask/value pair given in the whitelist entry.
+ */
+ if (reg->mask) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
+ if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
+ (offset + 2 > length ||
+ (cmd[offset + 1] & reg->mask) != reg->value)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
}
}
}
@@ -1105,7 +1171,8 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
- if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
+ if (!check_cmd(ring, desc, cmd, length, is_master,
+ &oacontrol_set)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 72f5a3f9dbf2..542fac628b28 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2300,10 +2300,15 @@ struct drm_i915_cmd_descriptor {
* Describes where to find a register address in the command to check
* against the ring's register whitelist. Only valid if flags has the
* CMD_DESC_REGISTER bit set.
+ *
+ * A non-zero step value implies that the command may access multiple
+ * registers in sequence (e.g. LRI), in that case step gives the
+ * distance in dwords between individual offset fields.
*/
struct {
u32 offset;
u32 mask;
+ u32 step;
} reg;
#define MAX_CMD_DESC_BITMASKS 3
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index be35f0486202..c35e0352b77f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2813,9 +2813,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
WARN_ON(i915_verify_lists(ring->dev));
- if (list_empty(&ring->active_list))
- return;
-
/* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4e3f302d86f7..e047105837c9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -87,7 +87,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *state,
+ bool force_restore);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -10096,7 +10097,7 @@ retry:
drm_mode_copy(&crtc_state->base.mode, mode);
- if (intel_set_mode(crtc, state)) {
+ if (intel_set_mode(crtc, state, true)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -10170,7 +10171,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (ret)
goto fail;
- ret = intel_set_mode(crtc, state);
+ ret = intel_set_mode(crtc, state, true);
if (ret)
goto fail;
@@ -11385,10 +11386,6 @@ static void intel_modeset_fixup_state(struct drm_atomic_state *state)
crtc->base.enabled = crtc->base.state->enable;
crtc->config = to_intel_crtc_state(crtc->base.state);
}
-
- /* Copy the new configuration to the staged state, to keep the few
- * pieces of code that haven't been converted yet happy */
- intel_modeset_update_staged_output_state(state->dev);
}
static void
@@ -12646,20 +12643,24 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
}
static int intel_set_mode_with_config(struct drm_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ bool force_restore)
{
int ret;
ret = __intel_set_mode(crtc, pipe_config);
- if (ret == 0)
+ if (ret == 0 && force_restore) {
+ intel_modeset_update_staged_output_state(crtc->dev);
intel_modeset_check_state(crtc->dev);
+ }
return ret;
}
static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state,
+ bool force_restore)
{
struct intel_crtc_state *pipe_config;
int ret = 0;
@@ -12670,7 +12671,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
goto out;
}
- ret = intel_set_mode_with_config(crtc, pipe_config);
+ ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
if (ret)
goto out;
@@ -12682,7 +12683,6 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
- struct intel_crtc *intel_crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_state *connector_state;
@@ -12725,29 +12725,23 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
}
}
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->new_enabled == intel_crtc->base.enabled)
- continue;
-
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state)) {
- DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
- intel_crtc->base.base.id,
- PTR_ERR(crtc_state));
- continue;
- }
+ crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(crtc_state)) {
+ DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
+ crtc->base.id, PTR_ERR(crtc_state));
+ drm_atomic_state_free(state);
+ return;
+ }
- crtc_state->base.active = crtc_state->base.enable =
- intel_crtc->new_enabled;
+ crtc_state->base.active = crtc_state->base.enable =
+ to_intel_crtc(crtc)->new_enabled;
- if (&intel_crtc->base == crtc)
- drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
- }
+ drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
crtc->primary->fb, crtc->x, crtc->y);
- ret = intel_set_mode(crtc, state);
+ ret = intel_set_mode(crtc, state, false);
if (ret)
drm_atomic_state_free(state);
}
@@ -12947,7 +12941,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
primary_plane_was_visible = primary_plane_visible(set->crtc);
- ret = intel_set_mode_with_config(set->crtc, pipe_config);
+ ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
if (ret == 0 &&
pipe_config->base.enable &&
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 39f6dfc0ee54..e539314ae87e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -118,6 +118,7 @@ struct intel_ringbuffer {
};
struct intel_context;
+struct drm_i915_reg_descriptor;
struct intel_engine_cs {
const char *name;
@@ -300,14 +301,14 @@ struct intel_engine_cs {
/*
* Table of registers allowed in commands that read/write registers.
*/
- const u32 *reg_table;
+ const struct drm_i915_reg_descriptor *reg_table;
int reg_count;
/*
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
- const u32 *master_reg_table;
+ const struct drm_i915_reg_descriptor *master_reg_table;
int master_reg_count;
/*