spapr: QOM'ify pseries machine
[qemu.git] / hw / ppc / spapr.c
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 *
26 */
27 #include "sysemu/sysemu.h"
28 #include "hw/hw.h"
29 #include "elf.h"
30 #include "net/net.h"
31 #include "sysemu/blockdev.h"
32 #include "sysemu/cpus.h"
33 #include "sysemu/kvm.h"
34 #include "kvm_ppc.h"
35 #include "mmu-hash64.h"
36
37 #include "hw/boards.h"
38 #include "hw/ppc/ppc.h"
39 #include "hw/loader.h"
40
41 #include "hw/ppc/spapr.h"
42 #include "hw/ppc/spapr_vio.h"
43 #include "hw/pci-host/spapr.h"
44 #include "hw/ppc/xics.h"
45 #include "hw/pci/msi.h"
46
47 #include "hw/pci/pci.h"
48
49 #include "exec/address-spaces.h"
50 #include "hw/usb.h"
51 #include "qemu/config-file.h"
52 #include "qemu/error-report.h"
53
54 #include <libfdt.h>
55
56 /* SLOF memory layout:
57 *
58 * SLOF raw image loaded at 0, copies its romfs right below the flat
59 * device-tree, then position SLOF itself 31M below that
60 *
61 * So we set FW_OVERHEAD to 40MB which should account for all of that
62 * and more
63 *
64 * We load our kernel at 4M, leaving space for SLOF initial image
65 */
66 #define FDT_MAX_SIZE 0x40000
67 #define RTAS_MAX_SIZE 0x10000
68 #define FW_MAX_SIZE 0x400000
69 #define FW_FILE_NAME "slof.bin"
70 #define FW_OVERHEAD 0x2800000
71 #define KERNEL_LOAD_ADDR FW_MAX_SIZE
72
73 #define MIN_RMA_SLOF 128UL
74
75 #define TIMEBASE_FREQ 512000000ULL
76
77 #define MAX_CPUS 256
78 #define XICS_IRQS 1024
79
80 #define PHANDLE_XICP 0x00001111
81
82 #define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
83
84 #define TYPE_SPAPR_MACHINE "spapr-machine"
85
86 sPAPREnvironment *spapr;
87
88 int spapr_allocate_irq(int hint, bool lsi)
89 {
90 int irq;
91
92 if (hint) {
93 irq = hint;
94 if (hint >= spapr->next_irq) {
95 spapr->next_irq = hint + 1;
96 }
97 /* FIXME: we should probably check for collisions somehow */
98 } else {
99 irq = spapr->next_irq++;
100 }
101
102 /* Configure irq type */
103 if (!xics_get_qirq(spapr->icp, irq)) {
104 return 0;
105 }
106
107 xics_set_irq_type(spapr->icp, irq, lsi);
108
109 return irq;
110 }
111
112 /*
113 * Allocate block of consequtive IRQs, returns a number of the first.
114 * If msi==true, aligns the first IRQ number to num.
115 */
116 int spapr_allocate_irq_block(int num, bool lsi, bool msi)
117 {
118 int first = -1;
119 int i, hint = 0;
120
121 /*
122 * MSIMesage::data is used for storing VIRQ so
123 * it has to be aligned to num to support multiple
124 * MSI vectors. MSI-X is not affected by this.
125 * The hint is used for the first IRQ, the rest should
126 * be allocated continuously.
127 */
128 if (msi) {
129 assert((num == 1) || (num == 2) || (num == 4) ||
130 (num == 8) || (num == 16) || (num == 32));
131 hint = (spapr->next_irq + num - 1) & ~(num - 1);
132 }
133
134 for (i = 0; i < num; ++i) {
135 int irq;
136
137 irq = spapr_allocate_irq(hint, lsi);
138 if (!irq) {
139 return -1;
140 }
141
142 if (0 == i) {
143 first = irq;
144 hint = 0;
145 }
146
147 /* If the above doesn't create a consecutive block then that's
148 * an internal bug */
149 assert(irq == (first + i));
150 }
151
152 return first;
153 }
154
155 static XICSState *try_create_xics(const char *type, int nr_servers,
156 int nr_irqs)
157 {
158 DeviceState *dev;
159
160 dev = qdev_create(NULL, type);
161 qdev_prop_set_uint32(dev, "nr_servers", nr_servers);
162 qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs);
163 if (qdev_init(dev) < 0) {
164 return NULL;
165 }
166
167 return XICS_COMMON(dev);
168 }
169
170 static XICSState *xics_system_init(int nr_servers, int nr_irqs)
171 {
172 XICSState *icp = NULL;
173
174 if (kvm_enabled()) {
175 QemuOpts *machine_opts = qemu_get_machine_opts();
176 bool irqchip_allowed = qemu_opt_get_bool(machine_opts,
177 "kernel_irqchip", true);
178 bool irqchip_required = qemu_opt_get_bool(machine_opts,
179 "kernel_irqchip", false);
180 if (irqchip_allowed) {
181 icp = try_create_xics(TYPE_KVM_XICS, nr_servers, nr_irqs);
182 }
183
184 if (irqchip_required && !icp) {
185 perror("Failed to create in-kernel XICS\n");
186 abort();
187 }
188 }
189
190 if (!icp) {
191 icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs);
192 }
193
194 if (!icp) {
195 perror("Failed to create XICS\n");
196 abort();
197 }
198
199 return icp;
200 }
201
202 static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
203 {
204 int ret = 0, offset;
205 CPUState *cpu;
206 char cpu_model[32];
207 int smt = kvmppc_smt_threads();
208 uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
209
210 CPU_FOREACH(cpu) {
211 DeviceClass *dc = DEVICE_GET_CLASS(cpu);
212 int index = ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
213 uint32_t associativity[] = {cpu_to_be32(0x5),
214 cpu_to_be32(0x0),
215 cpu_to_be32(0x0),
216 cpu_to_be32(0x0),
217 cpu_to_be32(cpu->numa_node),
218 cpu_to_be32(index)};
219
220 if ((index % smt) != 0) {
221 continue;
222 }
223
224 snprintf(cpu_model, 32, "/cpus/%s@%x", dc->fw_name,
225 index);
226
227 offset = fdt_path_offset(fdt, cpu_model);
228 if (offset < 0) {
229 return offset;
230 }
231
232 if (nb_numa_nodes > 1) {
233 ret = fdt_setprop(fdt, offset, "ibm,associativity", associativity,
234 sizeof(associativity));
235 if (ret < 0) {
236 return ret;
237 }
238 }
239
240 ret = fdt_setprop(fdt, offset, "ibm,pft-size",
241 pft_size_prop, sizeof(pft_size_prop));
242 if (ret < 0) {
243 return ret;
244 }
245 }
246 return ret;
247 }
248
249
250 static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop,
251 size_t maxsize)
252 {
253 size_t maxcells = maxsize / sizeof(uint32_t);
254 int i, j, count;
255 uint32_t *p = prop;
256
257 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
258 struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
259
260 if (!sps->page_shift) {
261 break;
262 }
263 for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) {
264 if (sps->enc[count].page_shift == 0) {
265 break;
266 }
267 }
268 if ((p - prop) >= (maxcells - 3 - count * 2)) {
269 break;
270 }
271 *(p++) = cpu_to_be32(sps->page_shift);
272 *(p++) = cpu_to_be32(sps->slb_enc);
273 *(p++) = cpu_to_be32(count);
274 for (j = 0; j < count; j++) {
275 *(p++) = cpu_to_be32(sps->enc[j].page_shift);
276 *(p++) = cpu_to_be32(sps->enc[j].pte_enc);
277 }
278 }
279
280 return (p - prop) * sizeof(uint32_t);
281 }
282
283 #define _FDT(exp) \
284 do { \
285 int ret = (exp); \
286 if (ret < 0) { \
287 fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \
288 #exp, fdt_strerror(ret)); \
289 exit(1); \
290 } \
291 } while (0)
292
293
294 static void *spapr_create_fdt_skel(hwaddr initrd_base,
295 hwaddr initrd_size,
296 hwaddr kernel_size,
297 bool little_endian,
298 const char *boot_device,
299 const char *kernel_cmdline,
300 uint32_t epow_irq)
301 {
302 void *fdt;
303 CPUState *cs;
304 uint32_t start_prop = cpu_to_be32(initrd_base);
305 uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
306 char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt"
307 "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk\0hcall-set-mode";
308 char qemu_hypertas_prop[] = "hcall-memop1";
309 uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)};
310 uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)};
311 int i, smt = kvmppc_smt_threads();
312 unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80};
313
314 fdt = g_malloc0(FDT_MAX_SIZE);
315 _FDT((fdt_create(fdt, FDT_MAX_SIZE)));
316
317 if (kernel_size) {
318 _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size)));
319 }
320 if (initrd_size) {
321 _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size)));
322 }
323 _FDT((fdt_finish_reservemap(fdt)));
324
325 /* Root node */
326 _FDT((fdt_begin_node(fdt, "")));
327 _FDT((fdt_property_string(fdt, "device_type", "chrp")));
328 _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)")));
329 _FDT((fdt_property_string(fdt, "compatible", "qemu,pseries")));
330
331 _FDT((fdt_property_cell(fdt, "#address-cells", 0x2)));
332 _FDT((fdt_property_cell(fdt, "#size-cells", 0x2)));
333
334 /* /chosen */
335 _FDT((fdt_begin_node(fdt, "chosen")));
336
337 /* Set Form1_affinity */
338 _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5))));
339
340 _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline)));
341 _FDT((fdt_property(fdt, "linux,initrd-start",
342 &start_prop, sizeof(start_prop))));
343 _FDT((fdt_property(fdt, "linux,initrd-end",
344 &end_prop, sizeof(end_prop))));
345 if (kernel_size) {
346 uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
347 cpu_to_be64(kernel_size) };
348
349 _FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop))));
350 if (little_endian) {
351 _FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0)));
352 }
353 }
354 if (boot_device) {
355 _FDT((fdt_property_string(fdt, "qemu,boot-device", boot_device)));
356 }
357 _FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width)));
358 _FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height)));
359 _FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth)));
360
361 _FDT((fdt_end_node(fdt)));
362
363 /* cpus */
364 _FDT((fdt_begin_node(fdt, "cpus")));
365
366 _FDT((fdt_property_cell(fdt, "#address-cells", 0x1)));
367 _FDT((fdt_property_cell(fdt, "#size-cells", 0x0)));
368
369 CPU_FOREACH(cs) {
370 PowerPCCPU *cpu = POWERPC_CPU(cs);
371 CPUPPCState *env = &cpu->env;
372 DeviceClass *dc = DEVICE_GET_CLASS(cs);
373 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
374 int index = ppc_get_vcpu_dt_id(cpu);
375 uint32_t servers_prop[smp_threads];
376 uint32_t gservers_prop[smp_threads * 2];
377 char *nodename;
378 uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
379 0xffffffff, 0xffffffff};
380 uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ;
381 uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
382 uint32_t page_sizes_prop[64];
383 size_t page_sizes_prop_size;
384
385 if ((index % smt) != 0) {
386 continue;
387 }
388
389 nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
390
391 _FDT((fdt_begin_node(fdt, nodename)));
392
393 g_free(nodename);
394
395 _FDT((fdt_property_cell(fdt, "reg", index)));
396 _FDT((fdt_property_string(fdt, "device_type", "cpu")));
397
398 _FDT((fdt_property_cell(fdt, "cpu-version", env->spr[SPR_PVR])));
399 _FDT((fdt_property_cell(fdt, "d-cache-block-size",
400 env->dcache_line_size)));
401 _FDT((fdt_property_cell(fdt, "d-cache-line-size",
402 env->dcache_line_size)));
403 _FDT((fdt_property_cell(fdt, "i-cache-block-size",
404 env->icache_line_size)));
405 _FDT((fdt_property_cell(fdt, "i-cache-line-size",
406 env->icache_line_size)));
407
408 if (pcc->l1_dcache_size) {
409 _FDT((fdt_property_cell(fdt, "d-cache-size", pcc->l1_dcache_size)));
410 } else {
411 fprintf(stderr, "Warning: Unknown L1 dcache size for cpu\n");
412 }
413 if (pcc->l1_icache_size) {
414 _FDT((fdt_property_cell(fdt, "i-cache-size", pcc->l1_icache_size)));
415 } else {
416 fprintf(stderr, "Warning: Unknown L1 icache size for cpu\n");
417 }
418
419 _FDT((fdt_property_cell(fdt, "timebase-frequency", tbfreq)));
420 _FDT((fdt_property_cell(fdt, "clock-frequency", cpufreq)));
421 _FDT((fdt_property_cell(fdt, "ibm,slb-size", env->slb_nr)));
422 _FDT((fdt_property_string(fdt, "status", "okay")));
423 _FDT((fdt_property(fdt, "64-bit", NULL, 0)));
424
425 /* Build interrupt servers and gservers properties */
426 for (i = 0; i < smp_threads; i++) {
427 servers_prop[i] = cpu_to_be32(index + i);
428 /* Hack, direct the group queues back to cpu 0 */
429 gservers_prop[i*2] = cpu_to_be32(index + i);
430 gservers_prop[i*2 + 1] = 0;
431 }
432 _FDT((fdt_property(fdt, "ibm,ppc-interrupt-server#s",
433 servers_prop, sizeof(servers_prop))));
434 _FDT((fdt_property(fdt, "ibm,ppc-interrupt-gserver#s",
435 gservers_prop, sizeof(gservers_prop))));
436
437 if (env->spr_cb[SPR_PURR].oea_read) {
438 _FDT((fdt_property(fdt, "ibm,purr", NULL, 0)));
439 }
440
441 if (env->mmu_model & POWERPC_MMU_1TSEG) {
442 _FDT((fdt_property(fdt, "ibm,processor-segment-sizes",
443 segs, sizeof(segs))));
444 }
445
446 /* Advertise VMX/VSX (vector extensions) if available
447 * 0 / no property == no vector extensions
448 * 1 == VMX / Altivec available
449 * 2 == VSX available */
450 if (env->insns_flags & PPC_ALTIVEC) {
451 uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1;
452
453 _FDT((fdt_property_cell(fdt, "ibm,vmx", vmx)));
454 }
455
456 /* Advertise DFP (Decimal Floating Point) if available
457 * 0 / no property == no DFP
458 * 1 == DFP available */
459 if (env->insns_flags2 & PPC2_DFP) {
460 _FDT((fdt_property_cell(fdt, "ibm,dfp", 1)));
461 }
462
463 page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop,
464 sizeof(page_sizes_prop));
465 if (page_sizes_prop_size) {
466 _FDT((fdt_property(fdt, "ibm,segment-page-sizes",
467 page_sizes_prop, page_sizes_prop_size)));
468 }
469
470 _FDT((fdt_end_node(fdt)));
471 }
472
473 _FDT((fdt_end_node(fdt)));
474
475 /* RTAS */
476 _FDT((fdt_begin_node(fdt, "rtas")));
477
478 _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop,
479 sizeof(hypertas_prop))));
480 _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas_prop,
481 sizeof(qemu_hypertas_prop))));
482
483 _FDT((fdt_property(fdt, "ibm,associativity-reference-points",
484 refpoints, sizeof(refpoints))));
485
486 _FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
487
488 _FDT((fdt_end_node(fdt)));
489
490 /* interrupt controller */
491 _FDT((fdt_begin_node(fdt, "interrupt-controller")));
492
493 _FDT((fdt_property_string(fdt, "device_type",
494 "PowerPC-External-Interrupt-Presentation")));
495 _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp")));
496 _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
497 _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges",
498 interrupt_server_ranges_prop,
499 sizeof(interrupt_server_ranges_prop))));
500 _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
501 _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP)));
502 _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP)));
503
504 _FDT((fdt_end_node(fdt)));
505
506 /* vdevice */
507 _FDT((fdt_begin_node(fdt, "vdevice")));
508
509 _FDT((fdt_property_string(fdt, "device_type", "vdevice")));
510 _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice")));
511 _FDT((fdt_property_cell(fdt, "#address-cells", 0x1)));
512 _FDT((fdt_property_cell(fdt, "#size-cells", 0x0)));
513 _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2)));
514 _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
515
516 _FDT((fdt_end_node(fdt)));
517
518 /* event-sources */
519 spapr_events_fdt_skel(fdt, epow_irq);
520
521 _FDT((fdt_end_node(fdt))); /* close root node */
522 _FDT((fdt_finish(fdt)));
523
524 return fdt;
525 }
526
527 static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt)
528 {
529 uint32_t associativity[] = {cpu_to_be32(0x4), cpu_to_be32(0x0),
530 cpu_to_be32(0x0), cpu_to_be32(0x0),
531 cpu_to_be32(0x0)};
532 char mem_name[32];
533 hwaddr node0_size, mem_start, node_size;
534 uint64_t mem_reg_property[2];
535 int i, off;
536
537 /* memory node(s) */
538 if (nb_numa_nodes > 1 && node_mem[0] < ram_size) {
539 node0_size = node_mem[0];
540 } else {
541 node0_size = ram_size;
542 }
543
544 /* RMA */
545 mem_reg_property[0] = 0;
546 mem_reg_property[1] = cpu_to_be64(spapr->rma_size);
547 off = fdt_add_subnode(fdt, 0, "memory@0");
548 _FDT(off);
549 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
550 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
551 sizeof(mem_reg_property))));
552 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
553 sizeof(associativity))));
554
555 /* RAM: Node 0 */
556 if (node0_size > spapr->rma_size) {
557 mem_reg_property[0] = cpu_to_be64(spapr->rma_size);
558 mem_reg_property[1] = cpu_to_be64(node0_size - spapr->rma_size);
559
560 sprintf(mem_name, "memory@" TARGET_FMT_lx, spapr->rma_size);
561 off = fdt_add_subnode(fdt, 0, mem_name);
562 _FDT(off);
563 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
564 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
565 sizeof(mem_reg_property))));
566 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
567 sizeof(associativity))));
568 }
569
570 /* RAM: Node 1 and beyond */
571 mem_start = node0_size;
572 for (i = 1; i < nb_numa_nodes; i++) {
573 mem_reg_property[0] = cpu_to_be64(mem_start);
574 if (mem_start >= ram_size) {
575 node_size = 0;
576 } else {
577 node_size = node_mem[i];
578 if (node_size > ram_size - mem_start) {
579 node_size = ram_size - mem_start;
580 }
581 }
582 mem_reg_property[1] = cpu_to_be64(node_size);
583 associativity[3] = associativity[4] = cpu_to_be32(i);
584 sprintf(mem_name, "memory@" TARGET_FMT_lx, mem_start);
585 off = fdt_add_subnode(fdt, 0, mem_name);
586 _FDT(off);
587 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
588 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
589 sizeof(mem_reg_property))));
590 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
591 sizeof(associativity))));
592 mem_start += node_size;
593 }
594
595 return 0;
596 }
597
598 static void spapr_finalize_fdt(sPAPREnvironment *spapr,
599 hwaddr fdt_addr,
600 hwaddr rtas_addr,
601 hwaddr rtas_size)
602 {
603 int ret;
604 void *fdt;
605 sPAPRPHBState *phb;
606
607 fdt = g_malloc(FDT_MAX_SIZE);
608
609 /* open out the base tree into a temp buffer for the final tweaks */
610 _FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE)));
611
612 ret = spapr_populate_memory(spapr, fdt);
613 if (ret < 0) {
614 fprintf(stderr, "couldn't setup memory nodes in fdt\n");
615 exit(1);
616 }
617
618 ret = spapr_populate_vdevice(spapr->vio_bus, fdt);
619 if (ret < 0) {
620 fprintf(stderr, "couldn't setup vio devices in fdt\n");
621 exit(1);
622 }
623
624 QLIST_FOREACH(phb, &spapr->phbs, list) {
625 ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
626 }
627
628 if (ret < 0) {
629 fprintf(stderr, "couldn't setup PCI devices in fdt\n");
630 exit(1);
631 }
632
633 /* RTAS */
634 ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size);
635 if (ret < 0) {
636 fprintf(stderr, "Couldn't set up RTAS device tree properties\n");
637 }
638
639 /* Advertise NUMA via ibm,associativity */
640 ret = spapr_fixup_cpu_dt(fdt, spapr);
641 if (ret < 0) {
642 fprintf(stderr, "Couldn't finalize CPU device tree properties\n");
643 }
644
645 if (!spapr->has_graphics) {
646 spapr_populate_chosen_stdout(fdt, spapr->vio_bus);
647 }
648
649 _FDT((fdt_pack(fdt)));
650
651 if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
652 hw_error("FDT too big ! 0x%x bytes (max is 0x%x)\n",
653 fdt_totalsize(fdt), FDT_MAX_SIZE);
654 exit(1);
655 }
656
657 cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
658
659 g_free(fdt);
660 }
661
662 static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
663 {
664 return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
665 }
666
667 static void emulate_spapr_hypercall(PowerPCCPU *cpu)
668 {
669 CPUPPCState *env = &cpu->env;
670
671 if (msr_pr) {
672 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
673 env->gpr[3] = H_PRIVILEGE;
674 } else {
675 env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
676 }
677 }
678
679 static void spapr_reset_htab(sPAPREnvironment *spapr)
680 {
681 long shift;
682
683 /* allocate hash page table. For now we always make this 16mb,
684 * later we should probably make it scale to the size of guest
685 * RAM */
686
687 shift = kvmppc_reset_htab(spapr->htab_shift);
688
689 if (shift > 0) {
690 /* Kernel handles htab, we don't need to allocate one */
691 spapr->htab_shift = shift;
692 kvmppc_kern_htab = true;
693 } else {
694 if (!spapr->htab) {
695 /* Allocate an htab if we don't yet have one */
696 spapr->htab = qemu_memalign(HTAB_SIZE(spapr), HTAB_SIZE(spapr));
697 }
698
699 /* And clear it */
700 memset(spapr->htab, 0, HTAB_SIZE(spapr));
701 }
702
703 /* Update the RMA size if necessary */
704 if (spapr->vrma_adjust) {
705 hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size;
706 spapr->rma_size = kvmppc_rma_size(node0_size, spapr->htab_shift);
707 }
708 }
709
710 static void ppc_spapr_reset(void)
711 {
712 PowerPCCPU *first_ppc_cpu;
713
714 /* Reset the hash table & recalc the RMA */
715 spapr_reset_htab(spapr);
716
717 qemu_devices_reset();
718
719 /* Load the fdt */
720 spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr,
721 spapr->rtas_size);
722
723 /* Set up the entry state */
724 first_ppc_cpu = POWERPC_CPU(first_cpu);
725 first_ppc_cpu->env.gpr[3] = spapr->fdt_addr;
726 first_ppc_cpu->env.gpr[5] = 0;
727 first_cpu->halted = 0;
728 first_ppc_cpu->env.nip = spapr->entry_point;
729
730 }
731
732 static void spapr_cpu_reset(void *opaque)
733 {
734 PowerPCCPU *cpu = opaque;
735 CPUState *cs = CPU(cpu);
736 CPUPPCState *env = &cpu->env;
737
738 cpu_reset(cs);
739
740 /* All CPUs start halted. CPU0 is unhalted from the machine level
741 * reset code and the rest are explicitly started up by the guest
742 * using an RTAS call */
743 cs->halted = 1;
744
745 env->spr[SPR_HIOR] = 0;
746
747 env->external_htab = (uint8_t *)spapr->htab;
748 if (kvm_enabled() && !env->external_htab) {
749 /*
750 * HV KVM, set external_htab to 1 so our ppc_hash64_load_hpte*
751 * functions do the right thing.
752 */
753 env->external_htab = (void *)1;
754 }
755 env->htab_base = -1;
756 /*
757 * htab_mask is the mask used to normalize hash value to PTEG index.
758 * htab_shift is log2 of hash table size.
759 * We have 8 hpte per group, and each hpte is 16 bytes.
760 * ie have 128 bytes per hpte entry.
761 */
762 env->htab_mask = (1ULL << ((spapr)->htab_shift - 7)) - 1;
763 env->spr[SPR_SDR1] = (target_ulong)(uintptr_t)spapr->htab |
764 (spapr->htab_shift - 18);
765 }
766
767 static void spapr_create_nvram(sPAPREnvironment *spapr)
768 {
769 DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
770 DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
771
772 if (dinfo) {
773 qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv);
774 }
775
776 qdev_init_nofail(dev);
777
778 spapr->nvram = (struct sPAPRNVRAM *)dev;
779 }
780
781 /* Returns whether we want to use VGA or not */
782 static int spapr_vga_init(PCIBus *pci_bus)
783 {
784 switch (vga_interface_type) {
785 case VGA_NONE:
786 return false;
787 case VGA_DEVICE:
788 return true;
789 case VGA_STD:
790 return pci_vga_init(pci_bus) != NULL;
791 default:
792 fprintf(stderr, "This vga model is not supported,"
793 "currently it only supports -vga std\n");
794 exit(0);
795 }
796 }
797
798 static const VMStateDescription vmstate_spapr = {
799 .name = "spapr",
800 .version_id = 1,
801 .minimum_version_id = 1,
802 .minimum_version_id_old = 1,
803 .fields = (VMStateField []) {
804 VMSTATE_UINT32(next_irq, sPAPREnvironment),
805
806 /* RTC offset */
807 VMSTATE_UINT64(rtc_offset, sPAPREnvironment),
808
809 VMSTATE_END_OF_LIST()
810 },
811 };
812
813 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
814 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
815 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
816 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
817
818 static int htab_save_setup(QEMUFile *f, void *opaque)
819 {
820 sPAPREnvironment *spapr = opaque;
821
822 /* "Iteration" header */
823 qemu_put_be32(f, spapr->htab_shift);
824
825 if (spapr->htab) {
826 spapr->htab_save_index = 0;
827 spapr->htab_first_pass = true;
828 } else {
829 assert(kvm_enabled());
830
831 spapr->htab_fd = kvmppc_get_htab_fd(false);
832 if (spapr->htab_fd < 0) {
833 fprintf(stderr, "Unable to open fd for reading hash table from KVM: %s\n",
834 strerror(errno));
835 return -1;
836 }
837 }
838
839
840 return 0;
841 }
842
843 static void htab_save_first_pass(QEMUFile *f, sPAPREnvironment *spapr,
844 int64_t max_ns)
845 {
846 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
847 int index = spapr->htab_save_index;
848 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
849
850 assert(spapr->htab_first_pass);
851
852 do {
853 int chunkstart;
854
855 /* Consume invalid HPTEs */
856 while ((index < htabslots)
857 && !HPTE_VALID(HPTE(spapr->htab, index))) {
858 index++;
859 CLEAN_HPTE(HPTE(spapr->htab, index));
860 }
861
862 /* Consume valid HPTEs */
863 chunkstart = index;
864 while ((index < htabslots)
865 && HPTE_VALID(HPTE(spapr->htab, index))) {
866 index++;
867 CLEAN_HPTE(HPTE(spapr->htab, index));
868 }
869
870 if (index > chunkstart) {
871 int n_valid = index - chunkstart;
872
873 qemu_put_be32(f, chunkstart);
874 qemu_put_be16(f, n_valid);
875 qemu_put_be16(f, 0);
876 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
877 HASH_PTE_SIZE_64 * n_valid);
878
879 if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
880 break;
881 }
882 }
883 } while ((index < htabslots) && !qemu_file_rate_limit(f));
884
885 if (index >= htabslots) {
886 assert(index == htabslots);
887 index = 0;
888 spapr->htab_first_pass = false;
889 }
890 spapr->htab_save_index = index;
891 }
892
893 static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr,
894 int64_t max_ns)
895 {
896 bool final = max_ns < 0;
897 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
898 int examined = 0, sent = 0;
899 int index = spapr->htab_save_index;
900 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
901
902 assert(!spapr->htab_first_pass);
903
904 do {
905 int chunkstart, invalidstart;
906
907 /* Consume non-dirty HPTEs */
908 while ((index < htabslots)
909 && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
910 index++;
911 examined++;
912 }
913
914 chunkstart = index;
915 /* Consume valid dirty HPTEs */
916 while ((index < htabslots)
917 && HPTE_DIRTY(HPTE(spapr->htab, index))
918 && HPTE_VALID(HPTE(spapr->htab, index))) {
919 CLEAN_HPTE(HPTE(spapr->htab, index));
920 index++;
921 examined++;
922 }
923
924 invalidstart = index;
925 /* Consume invalid dirty HPTEs */
926 while ((index < htabslots)
927 && HPTE_DIRTY(HPTE(spapr->htab, index))
928 && !HPTE_VALID(HPTE(spapr->htab, index))) {
929 CLEAN_HPTE(HPTE(spapr->htab, index));
930 index++;
931 examined++;
932 }
933
934 if (index > chunkstart) {
935 int n_valid = invalidstart - chunkstart;
936 int n_invalid = index - invalidstart;
937
938 qemu_put_be32(f, chunkstart);
939 qemu_put_be16(f, n_valid);
940 qemu_put_be16(f, n_invalid);
941 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
942 HASH_PTE_SIZE_64 * n_valid);
943 sent += index - chunkstart;
944
945 if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
946 break;
947 }
948 }
949
950 if (examined >= htabslots) {
951 break;
952 }
953
954 if (index >= htabslots) {
955 assert(index == htabslots);
956 index = 0;
957 }
958 } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
959
960 if (index >= htabslots) {
961 assert(index == htabslots);
962 index = 0;
963 }
964
965 spapr->htab_save_index = index;
966
967 return (examined >= htabslots) && (sent == 0) ? 1 : 0;
968 }
969
970 #define MAX_ITERATION_NS 5000000 /* 5 ms */
971 #define MAX_KVM_BUF_SIZE 2048
972
973 static int htab_save_iterate(QEMUFile *f, void *opaque)
974 {
975 sPAPREnvironment *spapr = opaque;
976 int rc = 0;
977
978 /* Iteration header */
979 qemu_put_be32(f, 0);
980
981 if (!spapr->htab) {
982 assert(kvm_enabled());
983
984 rc = kvmppc_save_htab(f, spapr->htab_fd,
985 MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
986 if (rc < 0) {
987 return rc;
988 }
989 } else if (spapr->htab_first_pass) {
990 htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
991 } else {
992 rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
993 }
994
995 /* End marker */
996 qemu_put_be32(f, 0);
997 qemu_put_be16(f, 0);
998 qemu_put_be16(f, 0);
999
1000 return rc;
1001 }
1002
1003 static int htab_save_complete(QEMUFile *f, void *opaque)
1004 {
1005 sPAPREnvironment *spapr = opaque;
1006
1007 /* Iteration header */
1008 qemu_put_be32(f, 0);
1009
1010 if (!spapr->htab) {
1011 int rc;
1012
1013 assert(kvm_enabled());
1014
1015 rc = kvmppc_save_htab(f, spapr->htab_fd, MAX_KVM_BUF_SIZE, -1);
1016 if (rc < 0) {
1017 return rc;
1018 }
1019 close(spapr->htab_fd);
1020 spapr->htab_fd = -1;
1021 } else {
1022 htab_save_later_pass(f, spapr, -1);
1023 }
1024
1025 /* End marker */
1026 qemu_put_be32(f, 0);
1027 qemu_put_be16(f, 0);
1028 qemu_put_be16(f, 0);
1029
1030 return 0;
1031 }
1032
1033 static int htab_load(QEMUFile *f, void *opaque, int version_id)
1034 {
1035 sPAPREnvironment *spapr = opaque;
1036 uint32_t section_hdr;
1037 int fd = -1;
1038
1039 if (version_id < 1 || version_id > 1) {
1040 fprintf(stderr, "htab_load() bad version\n");
1041 return -EINVAL;
1042 }
1043
1044 section_hdr = qemu_get_be32(f);
1045
1046 if (section_hdr) {
1047 /* First section, just the hash shift */
1048 if (spapr->htab_shift != section_hdr) {
1049 return -EINVAL;
1050 }
1051 return 0;
1052 }
1053
1054 if (!spapr->htab) {
1055 assert(kvm_enabled());
1056
1057 fd = kvmppc_get_htab_fd(true);
1058 if (fd < 0) {
1059 fprintf(stderr, "Unable to open fd to restore KVM hash table: %s\n",
1060 strerror(errno));
1061 }
1062 }
1063
1064 while (true) {
1065 uint32_t index;
1066 uint16_t n_valid, n_invalid;
1067
1068 index = qemu_get_be32(f);
1069 n_valid = qemu_get_be16(f);
1070 n_invalid = qemu_get_be16(f);
1071
1072 if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
1073 /* End of Stream */
1074 break;
1075 }
1076
1077 if ((index + n_valid + n_invalid) >
1078 (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
1079 /* Bad index in stream */
1080 fprintf(stderr, "htab_load() bad index %d (%hd+%hd entries) "
1081 "in htab stream (htab_shift=%d)\n", index, n_valid, n_invalid,
1082 spapr->htab_shift);
1083 return -EINVAL;
1084 }
1085
1086 if (spapr->htab) {
1087 if (n_valid) {
1088 qemu_get_buffer(f, HPTE(spapr->htab, index),
1089 HASH_PTE_SIZE_64 * n_valid);
1090 }
1091 if (n_invalid) {
1092 memset(HPTE(spapr->htab, index + n_valid), 0,
1093 HASH_PTE_SIZE_64 * n_invalid);
1094 }
1095 } else {
1096 int rc;
1097
1098 assert(fd >= 0);
1099
1100 rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
1101 if (rc < 0) {
1102 return rc;
1103 }
1104 }
1105 }
1106
1107 if (!spapr->htab) {
1108 assert(fd >= 0);
1109 close(fd);
1110 }
1111
1112 return 0;
1113 }
1114
1115 static SaveVMHandlers savevm_htab_handlers = {
1116 .save_live_setup = htab_save_setup,
1117 .save_live_iterate = htab_save_iterate,
1118 .save_live_complete = htab_save_complete,
1119 .load_state = htab_load,
1120 };
1121
1122 /* pSeries LPAR / sPAPR hardware init */
1123 static void ppc_spapr_init(QEMUMachineInitArgs *args)
1124 {
1125 ram_addr_t ram_size = args->ram_size;
1126 const char *cpu_model = args->cpu_model;
1127 const char *kernel_filename = args->kernel_filename;
1128 const char *kernel_cmdline = args->kernel_cmdline;
1129 const char *initrd_filename = args->initrd_filename;
1130 const char *boot_device = args->boot_order;
1131 PowerPCCPU *cpu;
1132 CPUPPCState *env;
1133 PCIHostState *phb;
1134 int i;
1135 MemoryRegion *sysmem = get_system_memory();
1136 MemoryRegion *ram = g_new(MemoryRegion, 1);
1137 hwaddr rma_alloc_size;
1138 hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size;
1139 uint32_t initrd_base = 0;
1140 long kernel_size = 0, initrd_size = 0;
1141 long load_limit, rtas_limit, fw_size;
1142 bool kernel_le = false;
1143 char *filename;
1144
1145 msi_supported = true;
1146
1147 spapr = g_malloc0(sizeof(*spapr));
1148 QLIST_INIT(&spapr->phbs);
1149
1150 cpu_ppc_hypercall = emulate_spapr_hypercall;
1151
1152 /* Allocate RMA if necessary */
1153 rma_alloc_size = kvmppc_alloc_rma("ppc_spapr.rma", sysmem);
1154
1155 if (rma_alloc_size == -1) {
1156 hw_error("qemu: Unable to create RMA\n");
1157 exit(1);
1158 }
1159
1160 if (rma_alloc_size && (rma_alloc_size < node0_size)) {
1161 spapr->rma_size = rma_alloc_size;
1162 } else {
1163 spapr->rma_size = node0_size;
1164
1165 /* With KVM, we don't actually know whether KVM supports an
1166 * unbounded RMA (PR KVM) or is limited by the hash table size
1167 * (HV KVM using VRMA), so we always assume the latter
1168 *
1169 * In that case, we also limit the initial allocations for RTAS
1170 * etc... to 256M since we have no way to know what the VRMA size
1171 * is going to be as it depends on the size of the hash table
1172 * isn't determined yet.
1173 */
1174 if (kvm_enabled()) {
1175 spapr->vrma_adjust = 1;
1176 spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
1177 }
1178 }
1179
1180 if (spapr->rma_size > node0_size) {
1181 fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n",
1182 spapr->rma_size);
1183 exit(1);
1184 }
1185
1186 /* We place the device tree and RTAS just below either the top of the RMA,
1187 * or just below 2GB, whichever is lowere, so that it can be
1188 * processed with 32-bit real mode code if necessary */
1189 rtas_limit = MIN(spapr->rma_size, 0x80000000);
1190 spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
1191 spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
1192 load_limit = spapr->fdt_addr - FW_OVERHEAD;
1193
1194 /* We aim for a hash table of size 1/128 the size of RAM. The
1195 * normal rule of thumb is 1/64 the size of RAM, but that's much
1196 * more than needed for the Linux guests we support. */
1197 spapr->htab_shift = 18; /* Minimum architected size */
1198 while (spapr->htab_shift <= 46) {
1199 if ((1ULL << (spapr->htab_shift + 7)) >= ram_size) {
1200 break;
1201 }
1202 spapr->htab_shift++;
1203 }
1204
1205 /* Set up Interrupt Controller before we create the VCPUs */
1206 spapr->icp = xics_system_init(smp_cpus * kvmppc_smt_threads() / smp_threads,
1207 XICS_IRQS);
1208 spapr->next_irq = XICS_IRQ_BASE;
1209
1210 /* init CPUs */
1211 if (cpu_model == NULL) {
1212 cpu_model = kvm_enabled() ? "host" : "POWER7";
1213 }
1214 for (i = 0; i < smp_cpus; i++) {
1215 cpu = cpu_ppc_init(cpu_model);
1216 if (cpu == NULL) {
1217 fprintf(stderr, "Unable to find PowerPC CPU definition\n");
1218 exit(1);
1219 }
1220 env = &cpu->env;
1221
1222 /* Set time-base frequency to 512 MHz */
1223 cpu_ppc_tb_init(env, TIMEBASE_FREQ);
1224
1225 /* PAPR always has exception vectors in RAM not ROM. To ensure this,
1226 * MSR[IP] should never be set.
1227 */
1228 env->msr_mask &= ~(1 << 6);
1229
1230 /* Tell KVM that we're in PAPR mode */
1231 if (kvm_enabled()) {
1232 kvmppc_set_papr(cpu);
1233 }
1234
1235 xics_cpu_setup(spapr->icp, cpu);
1236
1237 qemu_register_reset(spapr_cpu_reset, cpu);
1238 }
1239
1240 /* allocate RAM */
1241 spapr->ram_limit = ram_size;
1242 if (spapr->ram_limit > rma_alloc_size) {
1243 ram_addr_t nonrma_base = rma_alloc_size;
1244 ram_addr_t nonrma_size = spapr->ram_limit - rma_alloc_size;
1245
1246 memory_region_init_ram(ram, NULL, "ppc_spapr.ram", nonrma_size);
1247 vmstate_register_ram_global(ram);
1248 memory_region_add_subregion(sysmem, nonrma_base, ram);
1249 }
1250
1251 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
1252 spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr,
1253 rtas_limit - spapr->rtas_addr);
1254 if (spapr->rtas_size < 0) {
1255 hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
1256 exit(1);
1257 }
1258 if (spapr->rtas_size > RTAS_MAX_SIZE) {
1259 hw_error("RTAS too big ! 0x%lx bytes (max is 0x%x)\n",
1260 spapr->rtas_size, RTAS_MAX_SIZE);
1261 exit(1);
1262 }
1263 g_free(filename);
1264
1265 /* Set up EPOW events infrastructure */
1266 spapr_events_init(spapr);
1267
1268 /* Set up VIO bus */
1269 spapr->vio_bus = spapr_vio_bus_init();
1270
1271 for (i = 0; i < MAX_SERIAL_PORTS; i++) {
1272 if (serial_hds[i]) {
1273 spapr_vty_create(spapr->vio_bus, serial_hds[i]);
1274 }
1275 }
1276
1277 /* We always have at least the nvram device on VIO */
1278 spapr_create_nvram(spapr);
1279
1280 /* Set up PCI */
1281 spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW);
1282 spapr_pci_rtas_init();
1283
1284 phb = spapr_create_phb(spapr, 0);
1285
1286 for (i = 0; i < nb_nics; i++) {
1287 NICInfo *nd = &nd_table[i];
1288
1289 if (!nd->model) {
1290 nd->model = g_strdup("ibmveth");
1291 }
1292
1293 if (strcmp(nd->model, "ibmveth") == 0) {
1294 spapr_vlan_create(spapr->vio_bus, nd);
1295 } else {
1296 pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
1297 }
1298 }
1299
1300 for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
1301 spapr_vscsi_create(spapr->vio_bus);
1302 }
1303
1304 /* Graphics */
1305 if (spapr_vga_init(phb->bus)) {
1306 spapr->has_graphics = true;
1307 }
1308
1309 if (usb_enabled(spapr->has_graphics)) {
1310 pci_create_simple(phb->bus, -1, "pci-ohci");
1311 if (spapr->has_graphics) {
1312 usbdevice_create("keyboard");
1313 usbdevice_create("mouse");
1314 }
1315 }
1316
1317 if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
1318 fprintf(stderr, "qemu: pSeries SLOF firmware requires >= "
1319 "%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF);
1320 exit(1);
1321 }
1322
1323 if (kernel_filename) {
1324 uint64_t lowaddr = 0;
1325
1326 kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
1327 NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0);
1328 if (kernel_size == ELF_LOAD_WRONG_ENDIAN) {
1329 kernel_size = load_elf(kernel_filename,
1330 translate_kernel_address, NULL,
1331 NULL, &lowaddr, NULL, 0, ELF_MACHINE, 0);
1332 kernel_le = kernel_size > 0;
1333 }
1334 if (kernel_size < 0) {
1335 fprintf(stderr, "qemu: error loading %s: %s\n",
1336 kernel_filename, load_elf_strerror(kernel_size));
1337 exit(1);
1338 }
1339
1340 /* load initrd */
1341 if (initrd_filename) {
1342 /* Try to locate the initrd in the gap between the kernel
1343 * and the firmware. Add a bit of space just in case
1344 */
1345 initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff;
1346 initrd_size = load_image_targphys(initrd_filename, initrd_base,
1347 load_limit - initrd_base);
1348 if (initrd_size < 0) {
1349 fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
1350 initrd_filename);
1351 exit(1);
1352 }
1353 } else {
1354 initrd_base = 0;
1355 initrd_size = 0;
1356 }
1357 }
1358
1359 if (bios_name == NULL) {
1360 bios_name = FW_FILE_NAME;
1361 }
1362 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
1363 fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
1364 if (fw_size < 0) {
1365 hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
1366 exit(1);
1367 }
1368 g_free(filename);
1369
1370 spapr->entry_point = 0x100;
1371
1372 vmstate_register(NULL, 0, &vmstate_spapr, spapr);
1373 register_savevm_live(NULL, "spapr/htab", -1, 1,
1374 &savevm_htab_handlers, spapr);
1375
1376 /* Prepare the device tree */
1377 spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
1378 kernel_size, kernel_le,
1379 boot_device, kernel_cmdline,
1380 spapr->epow_irq);
1381 assert(spapr->fdt_skel != NULL);
1382 }
1383
1384 static int spapr_kvm_type(const char *vm_type)
1385 {
1386 if (!vm_type) {
1387 return 0;
1388 }
1389
1390 if (!strcmp(vm_type, "HV")) {
1391 return 1;
1392 }
1393
1394 if (!strcmp(vm_type, "PR")) {
1395 return 2;
1396 }
1397
1398 error_report("Unknown kvm-type specified '%s'", vm_type);
1399 exit(1);
1400 }
1401
1402 static QEMUMachine spapr_machine = {
1403 .name = "pseries",
1404 .desc = "pSeries Logical Partition (PAPR compliant)",
1405 .is_default = 1,
1406 .init = ppc_spapr_init,
1407 .reset = ppc_spapr_reset,
1408 .block_default_type = IF_SCSI,
1409 .max_cpus = MAX_CPUS,
1410 .no_parallel = 1,
1411 .default_boot_order = NULL,
1412 .kvm_type = spapr_kvm_type,
1413 };
1414
1415 static void spapr_machine_class_init(ObjectClass *oc, void *data)
1416 {
1417 MachineClass *mc = MACHINE_CLASS(oc);
1418
1419 mc->qemu_machine = data;
1420 }
1421
1422 static const TypeInfo spapr_machine_info = {
1423 .name = TYPE_SPAPR_MACHINE,
1424 .parent = TYPE_MACHINE,
1425 .class_init = spapr_machine_class_init,
1426 .class_data = &spapr_machine,
1427 };
1428
1429 static void spapr_machine_register_types(void)
1430 {
1431 type_register_static(&spapr_machine_info);
1432 }
1433
1434 type_init(spapr_machine_register_types)