Merge remote-tracking branch 'remotes/philmd-gitlab/tags/renesas-20201027' into staging
[qemu.git] / hw / ppc / spapr_nvdimm.c
1 /*
2 * QEMU PAPR Storage Class Memory Interfaces
3 *
4 * Copyright (c) 2019-2020, IBM Corporation.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "hw/ppc/spapr_drc.h"
27 #include "hw/ppc/spapr_nvdimm.h"
28 #include "hw/mem/nvdimm.h"
29 #include "qemu/nvdimm-utils.h"
30 #include "qemu/option.h"
31 #include "hw/ppc/fdt.h"
32 #include "qemu/range.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/ppc/spapr_numa.h"
35
36 bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
37 uint64_t size, Error **errp)
38 {
39 const MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
40 const MachineState *ms = MACHINE(hotplug_dev);
41 const char *nvdimm_opt = qemu_opt_get(qemu_get_machine_opts(), "nvdimm");
42 g_autofree char *uuidstr = NULL;
43 QemuUUID uuid;
44 int ret;
45
46 if (!mc->nvdimm_supported) {
47 error_setg(errp, "NVDIMM hotplug not supported for this machine");
48 return false;
49 }
50
51 /*
52 * NVDIMM support went live in 5.1 without considering that, in
53 * other archs, the user needs to enable NVDIMM support with the
54 * 'nvdimm' machine option and the default behavior is NVDIMM
55 * support disabled. It is too late to roll back to the standard
56 * behavior without breaking 5.1 guests. What we can do is to
57 * ensure that, if the user sets nvdimm=off, we error out
58 * regardless of being 5.1 or newer.
59 */
60 if (!ms->nvdimms_state->is_enabled && nvdimm_opt) {
61 error_setg(errp, "nvdimm device found but 'nvdimm=off' was set");
62 return false;
63 }
64
65 if (object_property_get_int(OBJECT(nvdimm), NVDIMM_LABEL_SIZE_PROP,
66 &error_abort) == 0) {
67 error_setg(errp, "PAPR requires NVDIMM devices to have label-size set");
68 return false;
69 }
70
71 if (size % SPAPR_MINIMUM_SCM_BLOCK_SIZE) {
72 error_setg(errp, "PAPR requires NVDIMM memory size (excluding label)"
73 " to be a multiple of %" PRIu64 "MB",
74 SPAPR_MINIMUM_SCM_BLOCK_SIZE / MiB);
75 return false;
76 }
77
78 uuidstr = object_property_get_str(OBJECT(nvdimm), NVDIMM_UUID_PROP,
79 &error_abort);
80 ret = qemu_uuid_parse(uuidstr, &uuid);
81 g_assert(!ret);
82
83 if (qemu_uuid_is_null(&uuid)) {
84 error_setg(errp, "NVDIMM device requires the uuid to be set");
85 return false;
86 }
87
88 return true;
89 }
90
91
92 void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp)
93 {
94 SpaprDrc *drc;
95 bool hotplugged = spapr_drc_hotplugged(dev);
96
97 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot);
98 g_assert(drc);
99
100 if (!spapr_drc_attach(drc, dev, errp)) {
101 return;
102 }
103
104 if (hotplugged) {
105 spapr_hotplug_req_add_by_index(drc);
106 }
107 }
108
109 void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
110 {
111 MachineState *machine = MACHINE(spapr);
112 int i;
113
114 for (i = 0; i < machine->ram_slots; i++) {
115 spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
116 }
117 }
118
119
120 static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt,
121 int parent_offset, NVDIMMDevice *nvdimm)
122 {
123 int child_offset;
124 char *buf;
125 SpaprDrc *drc;
126 uint32_t drc_idx;
127 uint32_t node = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_NODE_PROP,
128 &error_abort);
129 uint64_t slot = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_SLOT_PROP,
130 &error_abort);
131 uint64_t lsize = nvdimm->label_size;
132 uint64_t size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
133 NULL);
134
135 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot);
136 g_assert(drc);
137
138 drc_idx = spapr_drc_index(drc);
139
140 buf = g_strdup_printf("ibm,pmemory@%x", drc_idx);
141 child_offset = fdt_add_subnode(fdt, parent_offset, buf);
142 g_free(buf);
143
144 _FDT(child_offset);
145
146 _FDT((fdt_setprop_cell(fdt, child_offset, "reg", drc_idx)));
147 _FDT((fdt_setprop_string(fdt, child_offset, "compatible", "ibm,pmemory")));
148 _FDT((fdt_setprop_string(fdt, child_offset, "device_type", "ibm,pmemory")));
149
150 spapr_numa_write_associativity_dt(spapr, fdt, child_offset, node);
151
152 buf = qemu_uuid_unparse_strdup(&nvdimm->uuid);
153 _FDT((fdt_setprop_string(fdt, child_offset, "ibm,unit-guid", buf)));
154 g_free(buf);
155
156 _FDT((fdt_setprop_cell(fdt, child_offset, "ibm,my-drc-index", drc_idx)));
157
158 _FDT((fdt_setprop_u64(fdt, child_offset, "ibm,block-size",
159 SPAPR_MINIMUM_SCM_BLOCK_SIZE)));
160 _FDT((fdt_setprop_u64(fdt, child_offset, "ibm,number-of-blocks",
161 size / SPAPR_MINIMUM_SCM_BLOCK_SIZE)));
162 _FDT((fdt_setprop_cell(fdt, child_offset, "ibm,metadata-size", lsize)));
163
164 _FDT((fdt_setprop_string(fdt, child_offset, "ibm,pmem-application",
165 "operating-system")));
166 _FDT(fdt_setprop(fdt, child_offset, "ibm,cache-flush-required", NULL, 0));
167
168 return child_offset;
169 }
170
171 int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
172 void *fdt, int *fdt_start_offset, Error **errp)
173 {
174 NVDIMMDevice *nvdimm = NVDIMM(drc->dev);
175
176 *fdt_start_offset = spapr_dt_nvdimm(spapr, fdt, 0, nvdimm);
177
178 return 0;
179 }
180
181 void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt)
182 {
183 int offset = fdt_subnode_offset(fdt, 0, "persistent-memory");
184 GSList *iter, *nvdimms = nvdimm_get_device_list();
185
186 if (offset < 0) {
187 offset = fdt_add_subnode(fdt, 0, "persistent-memory");
188 _FDT(offset);
189 _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 0x1)));
190 _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0x0)));
191 _FDT((fdt_setprop_string(fdt, offset, "device_type",
192 "ibm,persistent-memory")));
193 }
194
195 /* Create DT entries for cold plugged NVDIMM devices */
196 for (iter = nvdimms; iter; iter = iter->next) {
197 NVDIMMDevice *nvdimm = iter->data;
198
199 spapr_dt_nvdimm(spapr, fdt, offset, nvdimm);
200 }
201 g_slist_free(nvdimms);
202
203 return;
204 }
205
206 static target_ulong h_scm_read_metadata(PowerPCCPU *cpu,
207 SpaprMachineState *spapr,
208 target_ulong opcode,
209 target_ulong *args)
210 {
211 uint32_t drc_index = args[0];
212 uint64_t offset = args[1];
213 uint64_t len = args[2];
214 SpaprDrc *drc = spapr_drc_by_index(drc_index);
215 NVDIMMDevice *nvdimm;
216 NVDIMMClass *ddc;
217 uint64_t data = 0;
218 uint8_t buf[8] = { 0 };
219
220 if (!drc || !drc->dev ||
221 spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
222 return H_PARAMETER;
223 }
224
225 if (len != 1 && len != 2 &&
226 len != 4 && len != 8) {
227 return H_P3;
228 }
229
230 nvdimm = NVDIMM(drc->dev);
231 if ((offset + len < offset) ||
232 (nvdimm->label_size < len + offset)) {
233 return H_P2;
234 }
235
236 ddc = NVDIMM_GET_CLASS(nvdimm);
237 ddc->read_label_data(nvdimm, buf, len, offset);
238
239 switch (len) {
240 case 1:
241 data = ldub_p(buf);
242 break;
243 case 2:
244 data = lduw_be_p(buf);
245 break;
246 case 4:
247 data = ldl_be_p(buf);
248 break;
249 case 8:
250 data = ldq_be_p(buf);
251 break;
252 default:
253 g_assert_not_reached();
254 }
255
256 args[0] = data;
257
258 return H_SUCCESS;
259 }
260
261 static target_ulong h_scm_write_metadata(PowerPCCPU *cpu,
262 SpaprMachineState *spapr,
263 target_ulong opcode,
264 target_ulong *args)
265 {
266 uint32_t drc_index = args[0];
267 uint64_t offset = args[1];
268 uint64_t data = args[2];
269 uint64_t len = args[3];
270 SpaprDrc *drc = spapr_drc_by_index(drc_index);
271 NVDIMMDevice *nvdimm;
272 NVDIMMClass *ddc;
273 uint8_t buf[8] = { 0 };
274
275 if (!drc || !drc->dev ||
276 spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
277 return H_PARAMETER;
278 }
279
280 if (len != 1 && len != 2 &&
281 len != 4 && len != 8) {
282 return H_P4;
283 }
284
285 nvdimm = NVDIMM(drc->dev);
286 if ((offset + len < offset) ||
287 (nvdimm->label_size < len + offset)) {
288 return H_P2;
289 }
290
291 switch (len) {
292 case 1:
293 if (data & 0xffffffffffffff00) {
294 return H_P2;
295 }
296 stb_p(buf, data);
297 break;
298 case 2:
299 if (data & 0xffffffffffff0000) {
300 return H_P2;
301 }
302 stw_be_p(buf, data);
303 break;
304 case 4:
305 if (data & 0xffffffff00000000) {
306 return H_P2;
307 }
308 stl_be_p(buf, data);
309 break;
310 case 8:
311 stq_be_p(buf, data);
312 break;
313 default:
314 g_assert_not_reached();
315 }
316
317 ddc = NVDIMM_GET_CLASS(nvdimm);
318 ddc->write_label_data(nvdimm, buf, len, offset);
319
320 return H_SUCCESS;
321 }
322
323 static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
324 target_ulong opcode, target_ulong *args)
325 {
326 uint32_t drc_index = args[0];
327 uint64_t starting_idx = args[1];
328 uint64_t no_of_scm_blocks_to_bind = args[2];
329 uint64_t target_logical_mem_addr = args[3];
330 uint64_t continue_token = args[4];
331 uint64_t size;
332 uint64_t total_no_of_scm_blocks;
333 SpaprDrc *drc = spapr_drc_by_index(drc_index);
334 hwaddr addr;
335 NVDIMMDevice *nvdimm;
336
337 if (!drc || !drc->dev ||
338 spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
339 return H_PARAMETER;
340 }
341
342 /*
343 * Currently continue token should be zero qemu has already bound
344 * everything and this hcall doesnt return H_BUSY.
345 */
346 if (continue_token > 0) {
347 return H_P5;
348 }
349
350 /* Currently qemu assigns the address. */
351 if (target_logical_mem_addr != 0xffffffffffffffff) {
352 return H_OVERLAP;
353 }
354
355 nvdimm = NVDIMM(drc->dev);
356
357 size = object_property_get_uint(OBJECT(nvdimm),
358 PC_DIMM_SIZE_PROP, &error_abort);
359
360 total_no_of_scm_blocks = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE;
361
362 if (starting_idx > total_no_of_scm_blocks) {
363 return H_P2;
364 }
365
366 if (((starting_idx + no_of_scm_blocks_to_bind) < starting_idx) ||
367 ((starting_idx + no_of_scm_blocks_to_bind) > total_no_of_scm_blocks)) {
368 return H_P3;
369 }
370
371 addr = object_property_get_uint(OBJECT(nvdimm),
372 PC_DIMM_ADDR_PROP, &error_abort);
373
374 addr += starting_idx * SPAPR_MINIMUM_SCM_BLOCK_SIZE;
375
376 /* Already bound, Return target logical address in R5 */
377 args[1] = addr;
378 args[2] = no_of_scm_blocks_to_bind;
379
380 return H_SUCCESS;
381 }
382
383 static target_ulong h_scm_unbind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
384 target_ulong opcode, target_ulong *args)
385 {
386 uint32_t drc_index = args[0];
387 uint64_t starting_scm_logical_addr = args[1];
388 uint64_t no_of_scm_blocks_to_unbind = args[2];
389 uint64_t continue_token = args[3];
390 uint64_t size_to_unbind;
391 Range blockrange = range_empty;
392 Range nvdimmrange = range_empty;
393 SpaprDrc *drc = spapr_drc_by_index(drc_index);
394 NVDIMMDevice *nvdimm;
395 uint64_t size, addr;
396
397 if (!drc || !drc->dev ||
398 spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
399 return H_PARAMETER;
400 }
401
402 /* continue_token should be zero as this hcall doesn't return H_BUSY. */
403 if (continue_token > 0) {
404 return H_P4;
405 }
406
407 /* Check if starting_scm_logical_addr is block aligned */
408 if (!QEMU_IS_ALIGNED(starting_scm_logical_addr,
409 SPAPR_MINIMUM_SCM_BLOCK_SIZE)) {
410 return H_P2;
411 }
412
413 size_to_unbind = no_of_scm_blocks_to_unbind * SPAPR_MINIMUM_SCM_BLOCK_SIZE;
414 if (no_of_scm_blocks_to_unbind == 0 || no_of_scm_blocks_to_unbind !=
415 size_to_unbind / SPAPR_MINIMUM_SCM_BLOCK_SIZE) {
416 return H_P3;
417 }
418
419 nvdimm = NVDIMM(drc->dev);
420 size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
421 &error_abort);
422 addr = object_property_get_int(OBJECT(nvdimm), PC_DIMM_ADDR_PROP,
423 &error_abort);
424
425 range_init_nofail(&nvdimmrange, addr, size);
426 range_init_nofail(&blockrange, starting_scm_logical_addr, size_to_unbind);
427
428 if (!range_contains_range(&nvdimmrange, &blockrange)) {
429 return H_P3;
430 }
431
432 args[1] = no_of_scm_blocks_to_unbind;
433
434 /* let unplug take care of actual unbind */
435 return H_SUCCESS;
436 }
437
438 #define H_UNBIND_SCOPE_ALL 0x1
439 #define H_UNBIND_SCOPE_DRC 0x2
440
441 static target_ulong h_scm_unbind_all(PowerPCCPU *cpu, SpaprMachineState *spapr,
442 target_ulong opcode, target_ulong *args)
443 {
444 uint64_t target_scope = args[0];
445 uint32_t drc_index = args[1];
446 uint64_t continue_token = args[2];
447 NVDIMMDevice *nvdimm;
448 uint64_t size;
449 uint64_t no_of_scm_blocks_unbound = 0;
450
451 /* continue_token should be zero as this hcall doesn't return H_BUSY. */
452 if (continue_token > 0) {
453 return H_P4;
454 }
455
456 if (target_scope == H_UNBIND_SCOPE_DRC) {
457 SpaprDrc *drc = spapr_drc_by_index(drc_index);
458
459 if (!drc || !drc->dev ||
460 spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
461 return H_P2;
462 }
463
464 nvdimm = NVDIMM(drc->dev);
465 size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
466 &error_abort);
467
468 no_of_scm_blocks_unbound = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE;
469 } else if (target_scope == H_UNBIND_SCOPE_ALL) {
470 GSList *list, *nvdimms;
471
472 nvdimms = nvdimm_get_device_list();
473 for (list = nvdimms; list; list = list->next) {
474 nvdimm = list->data;
475 size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
476 &error_abort);
477
478 no_of_scm_blocks_unbound += size / SPAPR_MINIMUM_SCM_BLOCK_SIZE;
479 }
480 g_slist_free(nvdimms);
481 } else {
482 return H_PARAMETER;
483 }
484
485 args[1] = no_of_scm_blocks_unbound;
486
487 /* let unplug take care of actual unbind */
488 return H_SUCCESS;
489 }
490
491 static void spapr_scm_register_types(void)
492 {
493 /* qemu/scm specific hcalls */
494 spapr_register_hypercall(H_SCM_READ_METADATA, h_scm_read_metadata);
495 spapr_register_hypercall(H_SCM_WRITE_METADATA, h_scm_write_metadata);
496 spapr_register_hypercall(H_SCM_BIND_MEM, h_scm_bind_mem);
497 spapr_register_hypercall(H_SCM_UNBIND_MEM, h_scm_unbind_mem);
498 spapr_register_hypercall(H_SCM_UNBIND_ALL, h_scm_unbind_all);
499 }
500
501 type_init(spapr_scm_register_types)