summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
authorHarald Freudenberger <freude@linux.ibm.com>2023-03-10 17:46:49 +0100
committerHeiko Carstens <hca@linux.ibm.com>2023-03-20 11:12:49 +0100
commit2d72eaf036d2f2b7ec16cda2d0e7ce292537dad9 (patch)
treef1bb1e46d956db7f1e6d308b6b869b83ee42727f /drivers/s390
parent263c8454dbffd4b878ea9bb403e157a56de98aca (diff)
s390/ap: implement SE AP bind, unbind and associate
Implementation of the new functions for SE AP support: bind, unbind and associate. There are two new sysfs attributes for this: /sys/devices/ap/cardxx/xx.yyyy/se_bind /sys/devices/ap/cardxx/xx.yyyy/se_associate Writing a 1 into the se_bind attribute triggers the SE AP bind for this AP queue, writing a 0 into does an unbind - that's a reset (RAPQ) with the F bit enabled. The se_associate attribute needs an integer value in range 0...2^16-1 written in. This is the index into a secrets table feed into the ultravisor. For more details please see the Architecture documents. These both new ap queue attributes are only visible inside a SE guest with SB (Secure Binding) available. Signed-off-by: Harald Freudenberger <freude@linux.ibm.com> Reviewed-by: Holger Dengler <dengler@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/crypto/ap_bus.h46
-rw-r--r--drivers/s390/crypto/ap_queue.c262
2 files changed, 291 insertions, 17 deletions
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index f14323c278a3..101fb324476f 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -39,22 +39,32 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
return (*ptr & (0x80000000u >> nr)) != 0;
}
-#define AP_RESPONSE_NORMAL 0x00
-#define AP_RESPONSE_Q_NOT_AVAIL 0x01
-#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
-#define AP_RESPONSE_DECONFIGURED 0x03
-#define AP_RESPONSE_CHECKSTOPPED 0x04
-#define AP_RESPONSE_BUSY 0x05
-#define AP_RESPONSE_INVALID_ADDRESS 0x06
-#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
-#define AP_RESPONSE_INVALID_GISA 0x08
-#define AP_RESPONSE_Q_FULL 0x10
-#define AP_RESPONSE_NO_PENDING_REPLY 0x10
-#define AP_RESPONSE_INDEX_TOO_BIG 0x11
-#define AP_RESPONSE_NO_FIRST_PART 0x13
-#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
-#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
-#define AP_RESPONSE_INVALID_DOMAIN 0x42
+#define AP_RESPONSE_NORMAL 0x00
+#define AP_RESPONSE_Q_NOT_AVAIL 0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
+#define AP_RESPONSE_DECONFIGURED 0x03
+#define AP_RESPONSE_CHECKSTOPPED 0x04
+#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_INVALID_ADDRESS 0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
+#define AP_RESPONSE_INVALID_GISA 0x08
+#define AP_RESPONSE_Q_BOUND_TO_ANOTHER 0x09
+#define AP_RESPONSE_STATE_CHANGE_IN_PROGRESS 0x0A
+#define AP_RESPONSE_Q_NOT_BOUND 0x0B
+#define AP_RESPONSE_Q_FULL 0x10
+#define AP_RESPONSE_NO_PENDING_REPLY 0x10
+#define AP_RESPONSE_INDEX_TOO_BIG 0x11
+#define AP_RESPONSE_NO_FIRST_PART 0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
+#define AP_RESPONSE_Q_BIND_ERROR 0x30
+#define AP_RESPONSE_Q_NOT_AVAIL_FOR_ASSOC 0x31
+#define AP_RESPONSE_Q_NOT_EMPTY 0x32
+#define AP_RESPONSE_BIND_LIMIT_EXCEEDED 0x33
+#define AP_RESPONSE_INVALID_ASSOC_SECRET 0x34
+#define AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE 0x35
+#define AP_RESPONSE_ASSOC_FAILED 0x36
+#define AP_RESPONSE_INVALID_DOMAIN 0x42
/*
* Known device types
@@ -92,6 +102,7 @@ enum ap_sm_state {
AP_SM_STATE_IDLE,
AP_SM_STATE_WORKING,
AP_SM_STATE_QUEUE_FULL,
+ AP_SM_STATE_ASSOC_WAIT,
NR_AP_SM_STATES
};
@@ -189,6 +200,7 @@ struct ap_card {
};
#define TAPQ_CARD_FUNC_CMP_MASK 0xFFFF0000
+#define ASSOC_IDX_INVALID 0x10000
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
@@ -202,6 +214,7 @@ struct ap_queue {
bool chkstop; /* checkstop state */
ap_qid_t qid; /* AP queue id. */
bool interrupt; /* indicate if interrupts are enabled */
+ unsigned int assoc_idx; /* SE association index */
int queue_count; /* # messages currently on AP queue. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
@@ -212,6 +225,7 @@ struct ap_queue {
struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
enum ap_sm_state sm_state; /* ap queue state machine state */
+ int rapq_fbit; /* fbit arg for next rapq invocation */
int last_err_rc; /* last error state response code */
};
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 60dbabec25cf..2be63f2554bd 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -18,6 +18,21 @@
static void __ap_flush_queue(struct ap_queue *aq);
+/*
+ * some AP queue helper functions
+ */
+
+static inline bool ap_q_supports_bind(struct ap_queue *aq)
+{
+ return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
+ ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
+}
+
+static inline bool ap_q_supports_assoc(struct ap_queue *aq)
+{
+ return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
+}
+
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
* @aq: The AP queue
@@ -322,12 +337,13 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
- status = ap_rapq(aq->qid, 0);
+ status = ap_rapq(aq->qid, aq->rapq_fbit);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
aq->interrupt = false;
+ aq->rapq_fbit = 0;
return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
@@ -423,6 +439,59 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
}
}
+/**
+ * ap_sm_assoc_wait(): Test queue for completion of a pending
+ * association request.
+ * @aq: pointer to the AP queue
+ */
+static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ /* handle asynchronous error on this queue */
+ if (status.async && status.response_code) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+
+ /* check bs bits */
+ switch (info.bs) {
+ case AP_BS_Q_USABLE:
+ /* association is through */
+ aq->sm_state = AP_SM_STATE_IDLE;
+ AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
+ __func__, AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid), aq->assoc_idx);
+ return AP_SM_WAIT_NONE;
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ /* association still pending */
+ return AP_SM_WAIT_LOW_TIMEOUT;
+ default:
+ /* reset from 'outside' happened or no idea at all */
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, info.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
/*
* AP state machine jump table
*/
@@ -451,6 +520,10 @@ static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
[AP_SM_EVENT_POLL] = ap_sm_read,
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
+ [AP_SM_STATE_ASSOC_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
+ },
};
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
@@ -696,6 +769,9 @@ static ssize_t states_show(struct device *dev,
case AP_SM_STATE_QUEUE_FULL:
rc += sysfs_emit_at(buf, rc, " [FULL]\n");
break;
+ case AP_SM_STATE_ASSOC_WAIT:
+ rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
+ break;
default:
rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
}
@@ -780,6 +856,186 @@ static struct device_type ap_queue_type = {
.groups = ap_queue_dev_attr_groups,
};
+static ssize_t se_bind_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ if (!ap_q_supports_bind(aq))
+ return sysfs_emit(buf, "-\n");
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+ switch (info.bs) {
+ case AP_BS_Q_USABLE:
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ return sysfs_emit(buf, "bound\n");
+ default:
+ return sysfs_emit(buf, "unbound\n");
+ }
+}
+
+static ssize_t se_bind_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ bool value;
+ int rc;
+
+ if (!ap_q_supports_bind(aq))
+ return -EINVAL;
+
+ /* only 0 (unbind) and 1 (bind) allowed */
+ rc = kstrtobool(buf, &value);
+ if (rc)
+ return rc;
+
+ if (value) {
+ /* bind, do BAPQ */
+ spin_lock_bh(&aq->lock);
+ if (aq->sm_state < AP_SM_STATE_IDLE) {
+ spin_unlock_bh(&aq->lock);
+ return -EBUSY;
+ }
+ status = ap_bapq(aq->qid);
+ spin_unlock_bh(&aq->lock);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+ } else {
+ /* unbind, set F bit arg and trigger RAPQ */
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ aq->rapq_fbit = 1;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(se_bind);
+
+static ssize_t se_associate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_gr2 info;
+
+ if (!ap_q_supports_assoc(aq))
+ return sysfs_emit(buf, "-\n");
+
+ status = ap_test_queue(aq->qid, 1, &info);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ switch (info.bs) {
+ case AP_BS_Q_USABLE:
+ if (aq->assoc_idx == ASSOC_IDX_INVALID) {
+ AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
+ return -EIO;
+ }
+ return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ if (aq->assoc_idx != ASSOC_IDX_INVALID)
+ return sysfs_emit(buf, "association pending\n");
+ fallthrough;
+ default:
+ return sysfs_emit(buf, "unassociated\n");
+ }
+}
+
+static ssize_t se_associate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ unsigned int value;
+ int rc;
+
+ if (!ap_q_supports_assoc(aq))
+ return -EINVAL;
+
+ /* association index needs to be >= 0 */
+ rc = kstrtouint(buf, 0, &value);
+ if (rc)
+ return rc;
+ if (value >= ASSOC_IDX_INVALID)
+ return -EINVAL;
+
+ spin_lock_bh(&aq->lock);
+
+ /* sm should be in idle state */
+ if (aq->sm_state != AP_SM_STATE_IDLE) {
+ spin_unlock_bh(&aq->lock);
+ return -EBUSY;
+ }
+
+ /* already associated or association pending ? */
+ if (aq->assoc_idx != ASSOC_IDX_INVALID) {
+ spin_unlock_bh(&aq->lock);
+ return -EINVAL;
+ }
+
+ /* trigger the asynchronous association request */
+ status = ap_aapq(aq->qid, value);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
+ aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
+ aq->assoc_idx = value;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ break;
+ default:
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(se_associate);
+
+static struct attribute *ap_queue_dev_sb_attrs[] = {
+ &dev_attr_se_bind.attr,
+ &dev_attr_se_associate.attr,
+ NULL
+};
+
+static struct attribute_group ap_queue_dev_sb_attr_group = {
+ .attrs = ap_queue_dev_sb_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
+ &ap_queue_dev_sb_attr_group,
+ NULL
+};
+
static void ap_queue_device_release(struct device *dev)
{
struct ap_queue *aq = to_ap_queue(dev);
@@ -801,6 +1057,9 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
+ // add optional SE secure binding attributes group
+ if (ap_sb_available() && is_prot_virt_guest())
+ aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
aq->qid = qid;
aq->interrupt = false;
spin_lock_init(&aq->lock);
@@ -947,6 +1206,7 @@ void ap_queue_init_state(struct ap_queue *aq)
aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
aq->last_err_rc = 0;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}