cirrus: fix off-by-one in cirrus_bitblt_rop_bkwd_transp_*_16
[qemu.git] / numa.c
1 /*
2 * NUMA parameter parsing routines
3 *
4 * Copyright (c) 2014 Fujitsu Ltd.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "sysemu/numa.h"
27 #include "exec/cpu-common.h"
28 #include "exec/ramlist.h"
29 #include "qemu/bitmap.h"
30 #include "qom/cpu.h"
31 #include "qemu/error-report.h"
32 #include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */
33 #include "qapi-visit.h"
34 #include "qapi/opts-visitor.h"
35 #include "hw/boards.h"
36 #include "sysemu/hostmem.h"
37 #include "qmp-commands.h"
38 #include "hw/mem/pc-dimm.h"
39 #include "qemu/option.h"
40 #include "qemu/config-file.h"
41
42 QemuOptsList qemu_numa_opts = {
43 .name = "numa",
44 .implied_opt_name = "type",
45 .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head),
46 .desc = { { 0 } } /* validated with OptsVisitor */
47 };
48
49 static int have_memdevs = -1;
50 static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one.
51 * For all nodes, nodeid < max_numa_nodeid
52 */
53 int nb_numa_nodes;
54 NodeInfo numa_info[MAX_NODES];
55
56 void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
57 {
58 struct numa_addr_range *range;
59
60 /*
61 * Memory-less nodes can come here with 0 size in which case,
62 * there is nothing to do.
63 */
64 if (!size) {
65 return;
66 }
67
68 range = g_malloc0(sizeof(*range));
69 range->mem_start = addr;
70 range->mem_end = addr + size - 1;
71 QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
72 }
73
74 void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
75 {
76 struct numa_addr_range *range, *next;
77
78 QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
79 if (addr == range->mem_start && (addr + size - 1) == range->mem_end) {
80 QLIST_REMOVE(range, entry);
81 g_free(range);
82 return;
83 }
84 }
85 }
86
87 static void numa_set_mem_ranges(void)
88 {
89 int i;
90 ram_addr_t mem_start = 0;
91
92 /*
93 * Deduce start address of each node and use it to store
94 * the address range info in numa_info address range list
95 */
96 for (i = 0; i < nb_numa_nodes; i++) {
97 numa_set_mem_node_id(mem_start, numa_info[i].node_mem, i);
98 mem_start += numa_info[i].node_mem;
99 }
100 }
101
102 /*
103 * Check if @addr falls under NUMA @node.
104 */
105 static bool numa_addr_belongs_to_node(ram_addr_t addr, uint32_t node)
106 {
107 struct numa_addr_range *range;
108
109 QLIST_FOREACH(range, &numa_info[node].addr, entry) {
110 if (addr >= range->mem_start && addr <= range->mem_end) {
111 return true;
112 }
113 }
114 return false;
115 }
116
117 /*
118 * Given an address, return the index of the NUMA node to which the
119 * address belongs to.
120 */
121 uint32_t numa_get_node(ram_addr_t addr, Error **errp)
122 {
123 uint32_t i;
124
125 /* For non NUMA configurations, check if the addr falls under node 0 */
126 if (!nb_numa_nodes) {
127 if (numa_addr_belongs_to_node(addr, 0)) {
128 return 0;
129 }
130 }
131
132 for (i = 0; i < nb_numa_nodes; i++) {
133 if (numa_addr_belongs_to_node(addr, i)) {
134 return i;
135 }
136 }
137
138 error_setg(errp, "Address 0x" RAM_ADDR_FMT " doesn't belong to any "
139 "NUMA node", addr);
140 return -1;
141 }
142
143 static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
144 {
145 uint16_t nodenr;
146 uint16List *cpus = NULL;
147
148 if (node->has_nodeid) {
149 nodenr = node->nodeid;
150 } else {
151 nodenr = nb_numa_nodes;
152 }
153
154 if (nodenr >= MAX_NODES) {
155 error_setg(errp, "Max number of NUMA nodes reached: %"
156 PRIu16 "", nodenr);
157 return;
158 }
159
160 if (numa_info[nodenr].present) {
161 error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr);
162 return;
163 }
164
165 for (cpus = node->cpus; cpus; cpus = cpus->next) {
166 if (cpus->value >= max_cpus) {
167 error_setg(errp,
168 "CPU index (%" PRIu16 ")"
169 " should be smaller than maxcpus (%d)",
170 cpus->value, max_cpus);
171 return;
172 }
173 bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1);
174 }
175
176 if (node->has_mem && node->has_memdev) {
177 error_setg(errp, "qemu: cannot specify both mem= and memdev=");
178 return;
179 }
180
181 if (have_memdevs == -1) {
182 have_memdevs = node->has_memdev;
183 }
184 if (node->has_memdev != have_memdevs) {
185 error_setg(errp, "qemu: memdev option must be specified for either "
186 "all or no nodes");
187 return;
188 }
189
190 if (node->has_mem) {
191 uint64_t mem_size = node->mem;
192 const char *mem_str = qemu_opt_get(opts, "mem");
193 /* Fix up legacy suffix-less format */
194 if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) {
195 mem_size <<= 20;
196 }
197 numa_info[nodenr].node_mem = mem_size;
198 }
199 if (node->has_memdev) {
200 Object *o;
201 o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL);
202 if (!o) {
203 error_setg(errp, "memdev=%s is ambiguous", node->memdev);
204 return;
205 }
206
207 object_ref(o);
208 numa_info[nodenr].node_mem = object_property_get_int(o, "size", NULL);
209 numa_info[nodenr].node_memdev = MEMORY_BACKEND(o);
210 }
211 numa_info[nodenr].present = true;
212 max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
213 }
214
215 static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
216 {
217 NumaOptions *object = NULL;
218 Error *err = NULL;
219
220 {
221 Visitor *v = opts_visitor_new(opts);
222 visit_type_NumaOptions(v, NULL, &object, &err);
223 visit_free(v);
224 }
225
226 if (err) {
227 goto end;
228 }
229
230 switch (object->type) {
231 case NUMA_OPTIONS_TYPE_NODE:
232 numa_node_parse(&object->u.node, opts, &err);
233 if (err) {
234 goto end;
235 }
236 nb_numa_nodes++;
237 break;
238 default:
239 abort();
240 }
241
242 end:
243 qapi_free_NumaOptions(object);
244 if (err) {
245 error_report_err(err);
246 return -1;
247 }
248
249 return 0;
250 }
251
252 static char *enumerate_cpus(unsigned long *cpus, int max_cpus)
253 {
254 int cpu;
255 bool first = true;
256 GString *s = g_string_new(NULL);
257
258 for (cpu = find_first_bit(cpus, max_cpus);
259 cpu < max_cpus;
260 cpu = find_next_bit(cpus, max_cpus, cpu + 1)) {
261 g_string_append_printf(s, "%s%d", first ? "" : " ", cpu);
262 first = false;
263 }
264 return g_string_free(s, FALSE);
265 }
266
267 static void validate_numa_cpus(void)
268 {
269 int i;
270 unsigned long *seen_cpus = bitmap_new(max_cpus);
271
272 for (i = 0; i < nb_numa_nodes; i++) {
273 if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu, max_cpus)) {
274 bitmap_and(seen_cpus, seen_cpus,
275 numa_info[i].node_cpu, max_cpus);
276 error_report("CPU(s) present in multiple NUMA nodes: %s",
277 enumerate_cpus(seen_cpus, max_cpus));
278 g_free(seen_cpus);
279 exit(EXIT_FAILURE);
280 }
281 bitmap_or(seen_cpus, seen_cpus,
282 numa_info[i].node_cpu, max_cpus);
283 }
284
285 if (!bitmap_full(seen_cpus, max_cpus)) {
286 char *msg;
287 bitmap_complement(seen_cpus, seen_cpus, max_cpus);
288 msg = enumerate_cpus(seen_cpus, max_cpus);
289 error_report("warning: CPU(s) not present in any NUMA nodes: %s", msg);
290 error_report("warning: All CPU(s) up to maxcpus should be described "
291 "in NUMA config");
292 g_free(msg);
293 }
294 g_free(seen_cpus);
295 }
296
297 void parse_numa_opts(MachineClass *mc)
298 {
299 int i;
300
301 for (i = 0; i < MAX_NODES; i++) {
302 numa_info[i].node_cpu = bitmap_new(max_cpus);
303 }
304
305 if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) {
306 exit(1);
307 }
308
309 assert(max_numa_nodeid <= MAX_NODES);
310
311 /* No support for sparse NUMA node IDs yet: */
312 for (i = max_numa_nodeid - 1; i >= 0; i--) {
313 /* Report large node IDs first, to make mistakes easier to spot */
314 if (!numa_info[i].present) {
315 error_report("numa: Node ID missing: %d", i);
316 exit(1);
317 }
318 }
319
320 /* This must be always true if all nodes are present: */
321 assert(nb_numa_nodes == max_numa_nodeid);
322
323 if (nb_numa_nodes > 0) {
324 uint64_t numa_total;
325
326 if (nb_numa_nodes > MAX_NODES) {
327 nb_numa_nodes = MAX_NODES;
328 }
329
330 /* If no memory size is given for any node, assume the default case
331 * and distribute the available memory equally across all nodes
332 */
333 for (i = 0; i < nb_numa_nodes; i++) {
334 if (numa_info[i].node_mem != 0) {
335 break;
336 }
337 }
338 if (i == nb_numa_nodes) {
339 uint64_t usedmem = 0;
340
341 /* On Linux, each node's border has to be 8MB aligned,
342 * the final node gets the rest.
343 */
344 for (i = 0; i < nb_numa_nodes - 1; i++) {
345 numa_info[i].node_mem = (ram_size / nb_numa_nodes) &
346 ~((1 << 23UL) - 1);
347 usedmem += numa_info[i].node_mem;
348 }
349 numa_info[i].node_mem = ram_size - usedmem;
350 }
351
352 numa_total = 0;
353 for (i = 0; i < nb_numa_nodes; i++) {
354 numa_total += numa_info[i].node_mem;
355 }
356 if (numa_total != ram_size) {
357 error_report("total memory for NUMA nodes (0x%" PRIx64 ")"
358 " should equal RAM size (0x" RAM_ADDR_FMT ")",
359 numa_total, ram_size);
360 exit(1);
361 }
362
363 for (i = 0; i < nb_numa_nodes; i++) {
364 QLIST_INIT(&numa_info[i].addr);
365 }
366
367 numa_set_mem_ranges();
368
369 for (i = 0; i < nb_numa_nodes; i++) {
370 if (!bitmap_empty(numa_info[i].node_cpu, max_cpus)) {
371 break;
372 }
373 }
374 /* Historically VCPUs were assigned in round-robin order to NUMA
375 * nodes. However it causes issues with guest not handling it nice
376 * in case where cores/threads from a multicore CPU appear on
377 * different nodes. So allow boards to override default distribution
378 * rule grouping VCPUs by socket so that VCPUs from the same socket
379 * would be on the same node.
380 */
381 if (i == nb_numa_nodes) {
382 for (i = 0; i < max_cpus; i++) {
383 unsigned node_id = i % nb_numa_nodes;
384 if (mc->cpu_index_to_socket_id) {
385 node_id = mc->cpu_index_to_socket_id(i) % nb_numa_nodes;
386 }
387
388 set_bit(i, numa_info[node_id].node_cpu);
389 }
390 }
391
392 validate_numa_cpus();
393 } else {
394 numa_set_mem_node_id(0, ram_size, 0);
395 }
396 }
397
398 void numa_post_machine_init(void)
399 {
400 CPUState *cpu;
401 int i;
402
403 CPU_FOREACH(cpu) {
404 for (i = 0; i < nb_numa_nodes; i++) {
405 assert(cpu->cpu_index < max_cpus);
406 if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) {
407 cpu->numa_node = i;
408 }
409 }
410 }
411 }
412
413 static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
414 const char *name,
415 uint64_t ram_size)
416 {
417 if (mem_path) {
418 #ifdef __linux__
419 Error *err = NULL;
420 memory_region_init_ram_from_file(mr, owner, name, ram_size, false,
421 mem_path, &err);
422 if (err) {
423 error_report_err(err);
424 if (mem_prealloc) {
425 exit(1);
426 }
427
428 /* Legacy behavior: if allocation failed, fall back to
429 * regular RAM allocation.
430 */
431 memory_region_init_ram(mr, owner, name, ram_size, &error_fatal);
432 }
433 #else
434 fprintf(stderr, "-mem-path not supported on this host\n");
435 exit(1);
436 #endif
437 } else {
438 memory_region_init_ram(mr, owner, name, ram_size, &error_fatal);
439 }
440 vmstate_register_ram_global(mr);
441 }
442
443 void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
444 const char *name,
445 uint64_t ram_size)
446 {
447 uint64_t addr = 0;
448 int i;
449
450 if (nb_numa_nodes == 0 || !have_memdevs) {
451 allocate_system_memory_nonnuma(mr, owner, name, ram_size);
452 return;
453 }
454
455 memory_region_init(mr, owner, name, ram_size);
456 for (i = 0; i < MAX_NODES; i++) {
457 uint64_t size = numa_info[i].node_mem;
458 HostMemoryBackend *backend = numa_info[i].node_memdev;
459 if (!backend) {
460 continue;
461 }
462 MemoryRegion *seg = host_memory_backend_get_memory(backend,
463 &error_fatal);
464
465 if (memory_region_is_mapped(seg)) {
466 char *path = object_get_canonical_path_component(OBJECT(backend));
467 error_report("memory backend %s is used multiple times. Each "
468 "-numa option must use a different memdev value.",
469 path);
470 exit(1);
471 }
472
473 host_memory_backend_set_mapped(backend, true);
474 memory_region_add_subregion(mr, addr, seg);
475 vmstate_register_ram_global(seg);
476 addr += size;
477 }
478 }
479
480 static void numa_stat_memory_devices(uint64_t node_mem[])
481 {
482 MemoryDeviceInfoList *info_list = NULL;
483 MemoryDeviceInfoList **prev = &info_list;
484 MemoryDeviceInfoList *info;
485
486 qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
487 for (info = info_list; info; info = info->next) {
488 MemoryDeviceInfo *value = info->value;
489
490 if (value) {
491 switch (value->type) {
492 case MEMORY_DEVICE_INFO_KIND_DIMM:
493 node_mem[value->u.dimm.data->node] += value->u.dimm.data->size;
494 break;
495 default:
496 break;
497 }
498 }
499 }
500 qapi_free_MemoryDeviceInfoList(info_list);
501 }
502
503 void query_numa_node_mem(uint64_t node_mem[])
504 {
505 int i;
506
507 if (nb_numa_nodes <= 0) {
508 return;
509 }
510
511 numa_stat_memory_devices(node_mem);
512 for (i = 0; i < nb_numa_nodes; i++) {
513 node_mem[i] += numa_info[i].node_mem;
514 }
515 }
516
517 static int query_memdev(Object *obj, void *opaque)
518 {
519 MemdevList **list = opaque;
520 MemdevList *m = NULL;
521
522 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
523 m = g_malloc0(sizeof(*m));
524
525 m->value = g_malloc0(sizeof(*m->value));
526
527 m->value->id = object_property_get_str(obj, "id", NULL);
528 m->value->has_id = !!m->value->id;
529
530 m->value->size = object_property_get_int(obj, "size",
531 &error_abort);
532 m->value->merge = object_property_get_bool(obj, "merge",
533 &error_abort);
534 m->value->dump = object_property_get_bool(obj, "dump",
535 &error_abort);
536 m->value->prealloc = object_property_get_bool(obj,
537 "prealloc",
538 &error_abort);
539 m->value->policy = object_property_get_enum(obj,
540 "policy",
541 "HostMemPolicy",
542 &error_abort);
543 object_property_get_uint16List(obj, "host-nodes",
544 &m->value->host_nodes,
545 &error_abort);
546
547 m->next = *list;
548 *list = m;
549 }
550
551 return 0;
552 }
553
554 MemdevList *qmp_query_memdev(Error **errp)
555 {
556 Object *obj = object_get_objects_root();
557 MemdevList *list = NULL;
558
559 object_child_foreach(obj, query_memdev, &list);
560 return list;
561 }
562
563 int numa_get_node_for_cpu(int idx)
564 {
565 int i;
566
567 assert(idx < max_cpus);
568
569 for (i = 0; i < nb_numa_nodes; i++) {
570 if (test_bit(idx, numa_info[i].node_cpu)) {
571 break;
572 }
573 }
574 return i;
575 }
576
577 void ram_block_notifier_add(RAMBlockNotifier *n)
578 {
579 QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
580 }
581
582 void ram_block_notifier_remove(RAMBlockNotifier *n)
583 {
584 QLIST_REMOVE(n, next);
585 }
586
587 void ram_block_notify_add(void *host, size_t size)
588 {
589 RAMBlockNotifier *notifier;
590
591 QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
592 notifier->ram_block_added(notifier, host, size);
593 }
594 }
595
596 void ram_block_notify_remove(void *host, size_t size)
597 {
598 RAMBlockNotifier *notifier;
599
600 QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
601 notifier->ram_block_removed(notifier, host, size);
602 }
603 }