linux-user, arm: add syscall table generation support
[qemu.git] / hw / tpm / tpm_spapr.c
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * PAPR Virtual TPM
5 *
6 * Copyright (c) 2015, 2017, 2019 IBM Corporation.
7 *
8 * Authors:
9 * Stefan Berger <stefanb@linux.vnet.ibm.com>
10 *
11 * This code is licensed under the GPL version 2 or later. See the
12 * COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/error-report.h"
18 #include "qapi/error.h"
19 #include "hw/qdev-properties.h"
20 #include "migration/vmstate.h"
21
22 #include "sysemu/tpm_backend.h"
23 #include "tpm_int.h"
24 #include "tpm_util.h"
25
26 #include "hw/ppc/spapr.h"
27 #include "hw/ppc/spapr_vio.h"
28 #include "trace.h"
29
30 #define DEBUG_SPAPR 0
31
32 #define VIO_SPAPR_VTPM(obj) \
33 OBJECT_CHECK(SpaprTpmState, (obj), TYPE_TPM_SPAPR)
34
35 typedef struct TpmCrq {
36 uint8_t valid; /* 0x80: cmd; 0xc0: init crq */
37 /* 0x81-0x83: CRQ message response */
38 uint8_t msg; /* see below */
39 uint16_t len; /* len of TPM request; len of TPM response */
40 uint32_t data; /* rtce_dma_handle when sending TPM request */
41 uint64_t reserved;
42 } TpmCrq;
43
44 #define SPAPR_VTPM_VALID_INIT_CRQ_COMMAND 0xC0
45 #define SPAPR_VTPM_VALID_COMMAND 0x80
46 #define SPAPR_VTPM_MSG_RESULT 0x80
47
48 /* msg types for valid = SPAPR_VTPM_VALID_INIT_CRQ */
49 #define SPAPR_VTPM_INIT_CRQ_RESULT 0x1
50 #define SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT 0x2
51
52 /* msg types for valid = SPAPR_VTPM_VALID_CMD */
53 #define SPAPR_VTPM_GET_VERSION 0x1
54 #define SPAPR_VTPM_TPM_COMMAND 0x2
55 #define SPAPR_VTPM_GET_RTCE_BUFFER_SIZE 0x3
56 #define SPAPR_VTPM_PREPARE_TO_SUSPEND 0x4
57
58 /* response error messages */
59 #define SPAPR_VTPM_VTPM_ERROR 0xff
60
61 /* error codes */
62 #define SPAPR_VTPM_ERR_COPY_IN_FAILED 0x3
63 #define SPAPR_VTPM_ERR_COPY_OUT_FAILED 0x4
64
65 #define TPM_SPAPR_BUFFER_MAX 4096
66
67 typedef struct {
68 SpaprVioDevice vdev;
69
70 TpmCrq crq; /* track single TPM command */
71
72 uint8_t state;
73 #define SPAPR_VTPM_STATE_NONE 0
74 #define SPAPR_VTPM_STATE_EXECUTION 1
75 #define SPAPR_VTPM_STATE_COMPLETION 2
76
77 unsigned char *buffer;
78
79 uint32_t numbytes; /* number of bytes to deliver on resume */
80
81 TPMBackendCmd cmd;
82
83 TPMBackend *be_driver;
84 TPMVersion be_tpm_version;
85
86 size_t be_buffer_size;
87 } SpaprTpmState;
88
89 /*
90 * Send a request to the TPM.
91 */
92 static void tpm_spapr_tpm_send(SpaprTpmState *s)
93 {
94 if (trace_event_get_state_backends(TRACE_TPM_SPAPR_SHOW_BUFFER)) {
95 tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM");
96 }
97
98 s->state = SPAPR_VTPM_STATE_EXECUTION;
99 s->cmd = (TPMBackendCmd) {
100 .locty = 0,
101 .in = s->buffer,
102 .in_len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size),
103 .out = s->buffer,
104 .out_len = s->be_buffer_size,
105 };
106
107 tpm_backend_deliver_request(s->be_driver, &s->cmd);
108 }
109
110 static int tpm_spapr_process_cmd(SpaprTpmState *s, uint64_t dataptr)
111 {
112 long rc;
113
114 /* a max. of be_buffer_size bytes can be transported */
115 rc = spapr_vio_dma_read(&s->vdev, dataptr,
116 s->buffer, s->be_buffer_size);
117 if (rc) {
118 error_report("tpm_spapr_got_payload: DMA read failure");
119 }
120 /* let vTPM handle any malformed request */
121 tpm_spapr_tpm_send(s);
122
123 return rc;
124 }
125
126 static inline int spapr_tpm_send_crq(struct SpaprVioDevice *dev, TpmCrq *crq)
127 {
128 return spapr_vio_send_crq(dev, (uint8_t *)crq);
129 }
130
131 static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data)
132 {
133 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
134 TpmCrq local_crq;
135 TpmCrq *crq = &s->crq; /* requests only */
136 int rc;
137 uint8_t valid = crq_data[0];
138 uint8_t msg = crq_data[1];
139
140 trace_tpm_spapr_do_crq(valid, msg);
141
142 switch (valid) {
143 case SPAPR_VTPM_VALID_INIT_CRQ_COMMAND: /* Init command/response */
144
145 /* Respond to initialization request */
146 switch (msg) {
147 case SPAPR_VTPM_INIT_CRQ_RESULT:
148 trace_tpm_spapr_do_crq_crq_result();
149 memset(&local_crq, 0, sizeof(local_crq));
150 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND;
151 local_crq.msg = SPAPR_VTPM_INIT_CRQ_RESULT;
152 spapr_tpm_send_crq(dev, &local_crq);
153 break;
154
155 case SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT:
156 trace_tpm_spapr_do_crq_crq_complete_result();
157 memset(&local_crq, 0, sizeof(local_crq));
158 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND;
159 local_crq.msg = SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT;
160 spapr_tpm_send_crq(dev, &local_crq);
161 break;
162 }
163
164 break;
165 case SPAPR_VTPM_VALID_COMMAND: /* Payloads */
166 switch (msg) {
167 case SPAPR_VTPM_TPM_COMMAND:
168 trace_tpm_spapr_do_crq_tpm_command();
169 if (s->state == SPAPR_VTPM_STATE_EXECUTION) {
170 return H_BUSY;
171 }
172 memcpy(crq, crq_data, sizeof(*crq));
173
174 rc = tpm_spapr_process_cmd(s, be32_to_cpu(crq->data));
175
176 if (rc == H_SUCCESS) {
177 crq->valid = be16_to_cpu(0);
178 } else {
179 local_crq.valid = SPAPR_VTPM_MSG_RESULT;
180 local_crq.msg = SPAPR_VTPM_VTPM_ERROR;
181 local_crq.len = cpu_to_be16(0);
182 local_crq.data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_IN_FAILED);
183 spapr_tpm_send_crq(dev, &local_crq);
184 }
185 break;
186
187 case SPAPR_VTPM_GET_RTCE_BUFFER_SIZE:
188 trace_tpm_spapr_do_crq_tpm_get_rtce_buffer_size(s->be_buffer_size);
189 local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
190 local_crq.msg = SPAPR_VTPM_GET_RTCE_BUFFER_SIZE |
191 SPAPR_VTPM_MSG_RESULT;
192 local_crq.len = cpu_to_be16(s->be_buffer_size);
193 spapr_tpm_send_crq(dev, &local_crq);
194 break;
195
196 case SPAPR_VTPM_GET_VERSION:
197 local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
198 local_crq.msg = SPAPR_VTPM_GET_VERSION | SPAPR_VTPM_MSG_RESULT;
199 local_crq.len = cpu_to_be16(0);
200 switch (s->be_tpm_version) {
201 case TPM_VERSION_1_2:
202 local_crq.data = cpu_to_be32(1);
203 break;
204 case TPM_VERSION_2_0:
205 local_crq.data = cpu_to_be32(2);
206 break;
207 default:
208 g_assert_not_reached();
209 break;
210 }
211 trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data));
212 spapr_tpm_send_crq(dev, &local_crq);
213 break;
214
215 case SPAPR_VTPM_PREPARE_TO_SUSPEND:
216 trace_tpm_spapr_do_crq_prepare_to_suspend();
217 local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
218 local_crq.msg = SPAPR_VTPM_PREPARE_TO_SUSPEND |
219 SPAPR_VTPM_MSG_RESULT;
220 spapr_tpm_send_crq(dev, &local_crq);
221 break;
222
223 default:
224 trace_tpm_spapr_do_crq_unknown_msg_type(crq->msg);
225 }
226 break;
227 default:
228 trace_tpm_spapr_do_crq_unknown_crq(valid, msg);
229 };
230
231 return H_SUCCESS;
232 }
233
234 static void tpm_spapr_request_completed(TPMIf *ti, int ret)
235 {
236 SpaprTpmState *s = VIO_SPAPR_VTPM(ti);
237 TpmCrq *crq = &s->crq;
238 uint32_t len;
239 int rc;
240
241 s->state = SPAPR_VTPM_STATE_COMPLETION;
242
243 /* a max. of be_buffer_size bytes can be transported */
244 len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size);
245
246 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
247 trace_tpm_spapr_caught_response(len);
248 /* defer delivery of response until .post_load */
249 s->numbytes = len;
250 return;
251 }
252
253 rc = spapr_vio_dma_write(&s->vdev, be32_to_cpu(crq->data),
254 s->buffer, len);
255
256 if (trace_event_get_state_backends(TRACE_TPM_SPAPR_SHOW_BUFFER)) {
257 tpm_util_show_buffer(s->buffer, len, "From TPM");
258 }
259
260 crq->valid = SPAPR_VTPM_MSG_RESULT;
261 if (rc == H_SUCCESS) {
262 crq->msg = SPAPR_VTPM_TPM_COMMAND | SPAPR_VTPM_MSG_RESULT;
263 crq->len = cpu_to_be16(len);
264 } else {
265 error_report("%s: DMA write failure", __func__);
266 crq->msg = SPAPR_VTPM_VTPM_ERROR;
267 crq->len = cpu_to_be16(0);
268 crq->data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_OUT_FAILED);
269 }
270
271 rc = spapr_tpm_send_crq(&s->vdev, crq);
272 if (rc) {
273 error_report("%s: Error sending response", __func__);
274 }
275 }
276
277 static int tpm_spapr_do_startup_tpm(SpaprTpmState *s, size_t buffersize)
278 {
279 return tpm_backend_startup_tpm(s->be_driver, buffersize);
280 }
281
282 static const char *tpm_spapr_get_dt_compatible(SpaprVioDevice *dev)
283 {
284 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
285
286 switch (s->be_tpm_version) {
287 case TPM_VERSION_1_2:
288 return "IBM,vtpm";
289 case TPM_VERSION_2_0:
290 return "IBM,vtpm20";
291 default:
292 g_assert_not_reached();
293 }
294 }
295
296 static void tpm_spapr_reset(SpaprVioDevice *dev)
297 {
298 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
299
300 s->state = SPAPR_VTPM_STATE_NONE;
301 s->numbytes = 0;
302
303 s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver);
304
305 s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver),
306 TPM_SPAPR_BUFFER_MAX);
307
308 tpm_backend_reset(s->be_driver);
309 tpm_spapr_do_startup_tpm(s, s->be_buffer_size);
310 }
311
312 static enum TPMVersion tpm_spapr_get_version(TPMIf *ti)
313 {
314 SpaprTpmState *s = VIO_SPAPR_VTPM(ti);
315
316 if (tpm_backend_had_startup_error(s->be_driver)) {
317 return TPM_VERSION_UNSPEC;
318 }
319
320 return tpm_backend_get_tpm_version(s->be_driver);
321 }
322
323 /* persistent state handling */
324
325 static int tpm_spapr_pre_save(void *opaque)
326 {
327 SpaprTpmState *s = opaque;
328
329 tpm_backend_finish_sync(s->be_driver);
330 /*
331 * we cannot deliver the results to the VM since DMA would touch VM memory
332 */
333
334 return 0;
335 }
336
337 static int tpm_spapr_post_load(void *opaque, int version_id)
338 {
339 SpaprTpmState *s = opaque;
340
341 if (s->numbytes) {
342 trace_tpm_spapr_post_load();
343 /* deliver the results to the VM via DMA */
344 tpm_spapr_request_completed(TPM_IF(s), 0);
345 s->numbytes = 0;
346 }
347
348 return 0;
349 }
350
351 static const VMStateDescription vmstate_spapr_vtpm = {
352 .name = "tpm-spapr",
353 .pre_save = tpm_spapr_pre_save,
354 .post_load = tpm_spapr_post_load,
355 .fields = (VMStateField[]) {
356 VMSTATE_SPAPR_VIO(vdev, SpaprTpmState),
357
358 VMSTATE_UINT8(state, SpaprTpmState),
359 VMSTATE_UINT32(numbytes, SpaprTpmState),
360 VMSTATE_VBUFFER_UINT32(buffer, SpaprTpmState, 0, NULL, numbytes),
361 /* remember DMA address */
362 VMSTATE_UINT32(crq.data, SpaprTpmState),
363 VMSTATE_END_OF_LIST(),
364 }
365 };
366
367 static Property tpm_spapr_properties[] = {
368 DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev),
369 DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver),
370 DEFINE_PROP_END_OF_LIST(),
371 };
372
373 static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp)
374 {
375 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
376
377 if (!tpm_find()) {
378 error_setg(errp, "at most one TPM device is permitted");
379 return;
380 }
381
382 dev->crq.SendFunc = tpm_spapr_do_crq;
383
384 if (!s->be_driver) {
385 error_setg(errp, "'tpmdev' property is required");
386 return;
387 }
388 s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX);
389 }
390
391 static void tpm_spapr_class_init(ObjectClass *klass, void *data)
392 {
393 DeviceClass *dc = DEVICE_CLASS(klass);
394 SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
395 TPMIfClass *tc = TPM_IF_CLASS(klass);
396
397 k->realize = tpm_spapr_realizefn;
398 k->reset = tpm_spapr_reset;
399 k->dt_name = "vtpm";
400 k->dt_type = "IBM,vtpm";
401 k->get_dt_compatible = tpm_spapr_get_dt_compatible;
402 k->signal_mask = 0x00000001;
403 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
404 device_class_set_props(dc, tpm_spapr_properties);
405 k->rtce_window_size = 0x10000000;
406 dc->vmsd = &vmstate_spapr_vtpm;
407
408 tc->model = TPM_MODEL_TPM_SPAPR;
409 tc->get_version = tpm_spapr_get_version;
410 tc->request_completed = tpm_spapr_request_completed;
411 }
412
413 static const TypeInfo tpm_spapr_info = {
414 .name = TYPE_TPM_SPAPR,
415 .parent = TYPE_VIO_SPAPR_DEVICE,
416 .instance_size = sizeof(SpaprTpmState),
417 .class_init = tpm_spapr_class_init,
418 .interfaces = (InterfaceInfo[]) {
419 { TYPE_TPM_IF },
420 { }
421 }
422 };
423
424 static void tpm_spapr_register_types(void)
425 {
426 type_register_static(&tpm_spapr_info);
427 }
428
429 type_init(tpm_spapr_register_types)