docker: Add EXTRA_CONFIGURE_OPTS
[qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #include <sys/mman.h>
23 #endif
24
25 #include "qemu/cutils.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "tcg.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
33 #endif
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #else /* !CONFIG_USER_ONLY */
42 #include "hw/hw.h"
43 #include "exec/memory.h"
44 #include "exec/ioport.h"
45 #include "sysemu/dma.h"
46 #include "exec/address-spaces.h"
47 #include "sysemu/xen-mapcache.h"
48 #include "trace.h"
49 #endif
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
55
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
58 #include "exec/log.h"
59
60 #include "migration/vmstate.h"
61
62 #include "qemu/range.h"
63 #ifndef _WIN32
64 #include "qemu/mmap-alloc.h"
65 #endif
66
67 //#define DEBUG_SUBPAGE
68
69 #if !defined(CONFIG_USER_ONLY)
70 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
71 * are protected by the ramlist lock.
72 */
73 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
74
75 static MemoryRegion *system_memory;
76 static MemoryRegion *system_io;
77
78 AddressSpace address_space_io;
79 AddressSpace address_space_memory;
80
81 MemoryRegion io_mem_rom, io_mem_notdirty;
82 static MemoryRegion io_mem_unassigned;
83
84 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
85 #define RAM_PREALLOC (1 << 0)
86
87 /* RAM is mmap-ed with MAP_SHARED */
88 #define RAM_SHARED (1 << 1)
89
90 /* Only a portion of RAM (used_length) is actually used, and migrated.
91 * This used_length size can change across reboots.
92 */
93 #define RAM_RESIZEABLE (1 << 2)
94
95 #endif
96
97 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
98 /* current CPU in the current thread. It is only valid inside
99 cpu_exec() */
100 __thread CPUState *current_cpu;
101 /* 0 = Do not count executed instructions.
102 1 = Precise instruction counting.
103 2 = Adaptive rate instruction counting. */
104 int use_icount;
105
106 #if !defined(CONFIG_USER_ONLY)
107
108 typedef struct PhysPageEntry PhysPageEntry;
109
110 struct PhysPageEntry {
111 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
112 uint32_t skip : 6;
113 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
114 uint32_t ptr : 26;
115 };
116
117 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118
119 /* Size of the L2 (and L3, etc) page tables. */
120 #define ADDR_SPACE_BITS 64
121
122 #define P_L2_BITS 9
123 #define P_L2_SIZE (1 << P_L2_BITS)
124
125 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126
127 typedef PhysPageEntry Node[P_L2_SIZE];
128
129 typedef struct PhysPageMap {
130 struct rcu_head rcu;
131
132 unsigned sections_nb;
133 unsigned sections_nb_alloc;
134 unsigned nodes_nb;
135 unsigned nodes_nb_alloc;
136 Node *nodes;
137 MemoryRegionSection *sections;
138 } PhysPageMap;
139
140 struct AddressSpaceDispatch {
141 struct rcu_head rcu;
142
143 MemoryRegionSection *mru_section;
144 /* This is a multi-level map on the physical address space.
145 * The bottom level has pointers to MemoryRegionSections.
146 */
147 PhysPageEntry phys_map;
148 PhysPageMap map;
149 AddressSpace *as;
150 };
151
152 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
153 typedef struct subpage_t {
154 MemoryRegion iomem;
155 AddressSpace *as;
156 hwaddr base;
157 uint16_t sub_section[TARGET_PAGE_SIZE];
158 } subpage_t;
159
160 #define PHYS_SECTION_UNASSIGNED 0
161 #define PHYS_SECTION_NOTDIRTY 1
162 #define PHYS_SECTION_ROM 2
163 #define PHYS_SECTION_WATCH 3
164
165 static void io_mem_init(void);
166 static void memory_map_init(void);
167 static void tcg_commit(MemoryListener *listener);
168
169 static MemoryRegion io_mem_watch;
170
171 /**
172 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
173 * @cpu: the CPU whose AddressSpace this is
174 * @as: the AddressSpace itself
175 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
176 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 */
178 struct CPUAddressSpace {
179 CPUState *cpu;
180 AddressSpace *as;
181 struct AddressSpaceDispatch *memory_dispatch;
182 MemoryListener tcg_as_listener;
183 };
184
185 #endif
186
187 #if !defined(CONFIG_USER_ONLY)
188
189 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
190 {
191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
195 }
196 }
197
198 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
199 {
200 unsigned i;
201 uint32_t ret;
202 PhysPageEntry e;
203 PhysPageEntry *p;
204
205 ret = map->nodes_nb++;
206 p = map->nodes[ret];
207 assert(ret != PHYS_MAP_NODE_NIL);
208 assert(ret != map->nodes_nb_alloc);
209
210 e.skip = leaf ? 0 : 1;
211 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
212 for (i = 0; i < P_L2_SIZE; ++i) {
213 memcpy(&p[i], &e, sizeof(e));
214 }
215 return ret;
216 }
217
218 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
219 hwaddr *index, hwaddr *nb, uint16_t leaf,
220 int level)
221 {
222 PhysPageEntry *p;
223 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
224
225 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
226 lp->ptr = phys_map_node_alloc(map, level == 0);
227 }
228 p = map->nodes[lp->ptr];
229 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
230
231 while (*nb && lp < &p[P_L2_SIZE]) {
232 if ((*index & (step - 1)) == 0 && *nb >= step) {
233 lp->skip = 0;
234 lp->ptr = leaf;
235 *index += step;
236 *nb -= step;
237 } else {
238 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
239 }
240 ++lp;
241 }
242 }
243
244 static void phys_page_set(AddressSpaceDispatch *d,
245 hwaddr index, hwaddr nb,
246 uint16_t leaf)
247 {
248 /* Wildly overreserve - it doesn't matter much. */
249 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
250
251 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
252 }
253
254 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
255 * and update our entry so we can skip it and go directly to the destination.
256 */
257 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
258 {
259 unsigned valid_ptr = P_L2_SIZE;
260 int valid = 0;
261 PhysPageEntry *p;
262 int i;
263
264 if (lp->ptr == PHYS_MAP_NODE_NIL) {
265 return;
266 }
267
268 p = nodes[lp->ptr];
269 for (i = 0; i < P_L2_SIZE; i++) {
270 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
271 continue;
272 }
273
274 valid_ptr = i;
275 valid++;
276 if (p[i].skip) {
277 phys_page_compact(&p[i], nodes, compacted);
278 }
279 }
280
281 /* We can only compress if there's only one child. */
282 if (valid != 1) {
283 return;
284 }
285
286 assert(valid_ptr < P_L2_SIZE);
287
288 /* Don't compress if it won't fit in the # of bits we have. */
289 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
290 return;
291 }
292
293 lp->ptr = p[valid_ptr].ptr;
294 if (!p[valid_ptr].skip) {
295 /* If our only child is a leaf, make this a leaf. */
296 /* By design, we should have made this node a leaf to begin with so we
297 * should never reach here.
298 * But since it's so simple to handle this, let's do it just in case we
299 * change this rule.
300 */
301 lp->skip = 0;
302 } else {
303 lp->skip += p[valid_ptr].skip;
304 }
305 }
306
307 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
308 {
309 DECLARE_BITMAP(compacted, nodes_nb);
310
311 if (d->phys_map.skip) {
312 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
313 }
314 }
315
316 static inline bool section_covers_addr(const MemoryRegionSection *section,
317 hwaddr addr)
318 {
319 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
320 * the section must cover the entire address space.
321 */
322 return section->size.hi ||
323 range_covers_byte(section->offset_within_address_space,
324 section->size.lo, addr);
325 }
326
327 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
328 Node *nodes, MemoryRegionSection *sections)
329 {
330 PhysPageEntry *p;
331 hwaddr index = addr >> TARGET_PAGE_BITS;
332 int i;
333
334 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
335 if (lp.ptr == PHYS_MAP_NODE_NIL) {
336 return &sections[PHYS_SECTION_UNASSIGNED];
337 }
338 p = nodes[lp.ptr];
339 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
340 }
341
342 if (section_covers_addr(&sections[lp.ptr], addr)) {
343 return &sections[lp.ptr];
344 } else {
345 return &sections[PHYS_SECTION_UNASSIGNED];
346 }
347 }
348
349 bool memory_region_is_unassigned(MemoryRegion *mr)
350 {
351 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
352 && mr != &io_mem_watch;
353 }
354
355 /* Called from RCU critical section */
356 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
357 hwaddr addr,
358 bool resolve_subpage)
359 {
360 MemoryRegionSection *section = atomic_read(&d->mru_section);
361 subpage_t *subpage;
362 bool update;
363
364 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
365 section_covers_addr(section, addr)) {
366 update = false;
367 } else {
368 section = phys_page_find(d->phys_map, addr, d->map.nodes,
369 d->map.sections);
370 update = true;
371 }
372 if (resolve_subpage && section->mr->subpage) {
373 subpage = container_of(section->mr, subpage_t, iomem);
374 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
375 }
376 if (update) {
377 atomic_set(&d->mru_section, section);
378 }
379 return section;
380 }
381
382 /* Called from RCU critical section */
383 static MemoryRegionSection *
384 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
385 hwaddr *plen, bool resolve_subpage)
386 {
387 MemoryRegionSection *section;
388 MemoryRegion *mr;
389 Int128 diff;
390
391 section = address_space_lookup_region(d, addr, resolve_subpage);
392 /* Compute offset within MemoryRegionSection */
393 addr -= section->offset_within_address_space;
394
395 /* Compute offset within MemoryRegion */
396 *xlat = addr + section->offset_within_region;
397
398 mr = section->mr;
399
400 /* MMIO registers can be expected to perform full-width accesses based only
401 * on their address, without considering adjacent registers that could
402 * decode to completely different MemoryRegions. When such registers
403 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
404 * regions overlap wildly. For this reason we cannot clamp the accesses
405 * here.
406 *
407 * If the length is small (as is the case for address_space_ldl/stl),
408 * everything works fine. If the incoming length is large, however,
409 * the caller really has to do the clamping through memory_access_size.
410 */
411 if (memory_region_is_ram(mr)) {
412 diff = int128_sub(section->size, int128_make64(addr));
413 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
414 }
415 return section;
416 }
417
418 /* Called from RCU critical section */
419 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
420 hwaddr *xlat, hwaddr *plen,
421 bool is_write)
422 {
423 IOMMUTLBEntry iotlb;
424 MemoryRegionSection *section;
425 MemoryRegion *mr;
426
427 for (;;) {
428 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
429 section = address_space_translate_internal(d, addr, &addr, plen, true);
430 mr = section->mr;
431
432 if (!mr->iommu_ops) {
433 break;
434 }
435
436 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
437 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
438 | (addr & iotlb.addr_mask));
439 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
440 if (!(iotlb.perm & (1 << is_write))) {
441 mr = &io_mem_unassigned;
442 break;
443 }
444
445 as = iotlb.target_as;
446 }
447
448 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
449 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
450 *plen = MIN(page, *plen);
451 }
452
453 *xlat = addr;
454 return mr;
455 }
456
457 /* Called from RCU critical section */
458 MemoryRegionSection *
459 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
460 hwaddr *xlat, hwaddr *plen)
461 {
462 MemoryRegionSection *section;
463 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
464
465 section = address_space_translate_internal(d, addr, xlat, plen, false);
466
467 assert(!section->mr->iommu_ops);
468 return section;
469 }
470 #endif
471
472 #if !defined(CONFIG_USER_ONLY)
473
474 static int cpu_common_post_load(void *opaque, int version_id)
475 {
476 CPUState *cpu = opaque;
477
478 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
479 version_id is increased. */
480 cpu->interrupt_request &= ~0x01;
481 tlb_flush(cpu, 1);
482
483 return 0;
484 }
485
486 static int cpu_common_pre_load(void *opaque)
487 {
488 CPUState *cpu = opaque;
489
490 cpu->exception_index = -1;
491
492 return 0;
493 }
494
495 static bool cpu_common_exception_index_needed(void *opaque)
496 {
497 CPUState *cpu = opaque;
498
499 return tcg_enabled() && cpu->exception_index != -1;
500 }
501
502 static const VMStateDescription vmstate_cpu_common_exception_index = {
503 .name = "cpu_common/exception_index",
504 .version_id = 1,
505 .minimum_version_id = 1,
506 .needed = cpu_common_exception_index_needed,
507 .fields = (VMStateField[]) {
508 VMSTATE_INT32(exception_index, CPUState),
509 VMSTATE_END_OF_LIST()
510 }
511 };
512
513 static bool cpu_common_crash_occurred_needed(void *opaque)
514 {
515 CPUState *cpu = opaque;
516
517 return cpu->crash_occurred;
518 }
519
520 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
521 .name = "cpu_common/crash_occurred",
522 .version_id = 1,
523 .minimum_version_id = 1,
524 .needed = cpu_common_crash_occurred_needed,
525 .fields = (VMStateField[]) {
526 VMSTATE_BOOL(crash_occurred, CPUState),
527 VMSTATE_END_OF_LIST()
528 }
529 };
530
531 const VMStateDescription vmstate_cpu_common = {
532 .name = "cpu_common",
533 .version_id = 1,
534 .minimum_version_id = 1,
535 .pre_load = cpu_common_pre_load,
536 .post_load = cpu_common_post_load,
537 .fields = (VMStateField[]) {
538 VMSTATE_UINT32(halted, CPUState),
539 VMSTATE_UINT32(interrupt_request, CPUState),
540 VMSTATE_END_OF_LIST()
541 },
542 .subsections = (const VMStateDescription*[]) {
543 &vmstate_cpu_common_exception_index,
544 &vmstate_cpu_common_crash_occurred,
545 NULL
546 }
547 };
548
549 #endif
550
551 CPUState *qemu_get_cpu(int index)
552 {
553 CPUState *cpu;
554
555 CPU_FOREACH(cpu) {
556 if (cpu->cpu_index == index) {
557 return cpu;
558 }
559 }
560
561 return NULL;
562 }
563
564 #if !defined(CONFIG_USER_ONLY)
565 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
566 {
567 CPUAddressSpace *newas;
568
569 /* Target code should have set num_ases before calling us */
570 assert(asidx < cpu->num_ases);
571
572 if (asidx == 0) {
573 /* address space 0 gets the convenience alias */
574 cpu->as = as;
575 }
576
577 /* KVM cannot currently support multiple address spaces. */
578 assert(asidx == 0 || !kvm_enabled());
579
580 if (!cpu->cpu_ases) {
581 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
582 }
583
584 newas = &cpu->cpu_ases[asidx];
585 newas->cpu = cpu;
586 newas->as = as;
587 if (tcg_enabled()) {
588 newas->tcg_as_listener.commit = tcg_commit;
589 memory_listener_register(&newas->tcg_as_listener, as);
590 }
591 }
592
593 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
594 {
595 /* Return the AddressSpace corresponding to the specified index */
596 return cpu->cpu_ases[asidx].as;
597 }
598 #endif
599
600 #ifndef CONFIG_USER_ONLY
601 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
602
603 static int cpu_get_free_index(Error **errp)
604 {
605 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
606
607 if (cpu >= MAX_CPUMASK_BITS) {
608 error_setg(errp, "Trying to use more CPUs than max of %d",
609 MAX_CPUMASK_BITS);
610 return -1;
611 }
612
613 bitmap_set(cpu_index_map, cpu, 1);
614 return cpu;
615 }
616
617 static void cpu_release_index(CPUState *cpu)
618 {
619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620 }
621 #else
622
623 static int cpu_get_free_index(Error **errp)
624 {
625 CPUState *some_cpu;
626 int cpu_index = 0;
627
628 CPU_FOREACH(some_cpu) {
629 cpu_index++;
630 }
631 return cpu_index;
632 }
633
634 static void cpu_release_index(CPUState *cpu)
635 {
636 return;
637 }
638 #endif
639
640 void cpu_exec_exit(CPUState *cpu)
641 {
642 CPUClass *cc = CPU_GET_CLASS(cpu);
643
644 #if defined(CONFIG_USER_ONLY)
645 cpu_list_lock();
646 #endif
647 if (cpu->cpu_index == -1) {
648 /* cpu_index was never allocated by this @cpu or was already freed. */
649 #if defined(CONFIG_USER_ONLY)
650 cpu_list_unlock();
651 #endif
652 return;
653 }
654
655 QTAILQ_REMOVE(&cpus, cpu, node);
656 cpu_release_index(cpu);
657 cpu->cpu_index = -1;
658 #if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660 #endif
661
662 if (cc->vmsd != NULL) {
663 vmstate_unregister(NULL, cc->vmsd, cpu);
664 }
665 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
666 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
667 }
668 }
669
670 void cpu_exec_init(CPUState *cpu, Error **errp)
671 {
672 CPUClass *cc = CPU_GET_CLASS(cpu);
673 Error *local_err = NULL;
674
675 cpu->as = NULL;
676 cpu->num_ases = 0;
677
678 #ifndef CONFIG_USER_ONLY
679 cpu->thread_id = qemu_get_thread_id();
680
681 /* This is a softmmu CPU object, so create a property for it
682 * so users can wire up its memory. (This can't go in qom/cpu.c
683 * because that file is compiled only once for both user-mode
684 * and system builds.) The default if no link is set up is to use
685 * the system address space.
686 */
687 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
688 (Object **)&cpu->memory,
689 qdev_prop_allow_set_link_before_realize,
690 OBJ_PROP_LINK_UNREF_ON_RELEASE,
691 &error_abort);
692 cpu->memory = system_memory;
693 object_ref(OBJECT(cpu->memory));
694 #endif
695
696 #if defined(CONFIG_USER_ONLY)
697 cpu_list_lock();
698 #endif
699 cpu->cpu_index = cpu_get_free_index(&local_err);
700 if (local_err) {
701 error_propagate(errp, local_err);
702 #if defined(CONFIG_USER_ONLY)
703 cpu_list_unlock();
704 #endif
705 return;
706 }
707 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
708 #if defined(CONFIG_USER_ONLY)
709 (void) cc;
710 cpu_list_unlock();
711 #else
712 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
713 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
714 }
715 if (cc->vmsd != NULL) {
716 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
717 }
718 #endif
719 }
720
721 #if defined(CONFIG_USER_ONLY)
722 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
723 {
724 tb_invalidate_phys_page_range(pc, pc + 1, 0);
725 }
726 #else
727 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
728 {
729 MemTxAttrs attrs;
730 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
731 int asidx = cpu_asidx_from_attrs(cpu, attrs);
732 if (phys != -1) {
733 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
734 phys | (pc & ~TARGET_PAGE_MASK));
735 }
736 }
737 #endif
738
739 #if defined(CONFIG_USER_ONLY)
740 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
741
742 {
743 }
744
745 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
746 int flags)
747 {
748 return -ENOSYS;
749 }
750
751 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
752 {
753 }
754
755 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
756 int flags, CPUWatchpoint **watchpoint)
757 {
758 return -ENOSYS;
759 }
760 #else
761 /* Add a watchpoint. */
762 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
763 int flags, CPUWatchpoint **watchpoint)
764 {
765 CPUWatchpoint *wp;
766
767 /* forbid ranges which are empty or run off the end of the address space */
768 if (len == 0 || (addr + len - 1) < addr) {
769 error_report("tried to set invalid watchpoint at %"
770 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
771 return -EINVAL;
772 }
773 wp = g_malloc(sizeof(*wp));
774
775 wp->vaddr = addr;
776 wp->len = len;
777 wp->flags = flags;
778
779 /* keep all GDB-injected watchpoints in front */
780 if (flags & BP_GDB) {
781 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
782 } else {
783 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
784 }
785
786 tlb_flush_page(cpu, addr);
787
788 if (watchpoint)
789 *watchpoint = wp;
790 return 0;
791 }
792
793 /* Remove a specific watchpoint. */
794 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
795 int flags)
796 {
797 CPUWatchpoint *wp;
798
799 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
800 if (addr == wp->vaddr && len == wp->len
801 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
802 cpu_watchpoint_remove_by_ref(cpu, wp);
803 return 0;
804 }
805 }
806 return -ENOENT;
807 }
808
809 /* Remove a specific watchpoint by reference. */
810 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
811 {
812 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
813
814 tlb_flush_page(cpu, watchpoint->vaddr);
815
816 g_free(watchpoint);
817 }
818
819 /* Remove all matching watchpoints. */
820 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
821 {
822 CPUWatchpoint *wp, *next;
823
824 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
825 if (wp->flags & mask) {
826 cpu_watchpoint_remove_by_ref(cpu, wp);
827 }
828 }
829 }
830
831 /* Return true if this watchpoint address matches the specified
832 * access (ie the address range covered by the watchpoint overlaps
833 * partially or completely with the address range covered by the
834 * access).
835 */
836 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
837 vaddr addr,
838 vaddr len)
839 {
840 /* We know the lengths are non-zero, but a little caution is
841 * required to avoid errors in the case where the range ends
842 * exactly at the top of the address space and so addr + len
843 * wraps round to zero.
844 */
845 vaddr wpend = wp->vaddr + wp->len - 1;
846 vaddr addrend = addr + len - 1;
847
848 return !(addr > wpend || wp->vaddr > addrend);
849 }
850
851 #endif
852
853 /* Add a breakpoint. */
854 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
855 CPUBreakpoint **breakpoint)
856 {
857 CPUBreakpoint *bp;
858
859 bp = g_malloc(sizeof(*bp));
860
861 bp->pc = pc;
862 bp->flags = flags;
863
864 /* keep all GDB-injected breakpoints in front */
865 if (flags & BP_GDB) {
866 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
867 } else {
868 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
869 }
870
871 breakpoint_invalidate(cpu, pc);
872
873 if (breakpoint) {
874 *breakpoint = bp;
875 }
876 return 0;
877 }
878
879 /* Remove a specific breakpoint. */
880 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
881 {
882 CPUBreakpoint *bp;
883
884 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
885 if (bp->pc == pc && bp->flags == flags) {
886 cpu_breakpoint_remove_by_ref(cpu, bp);
887 return 0;
888 }
889 }
890 return -ENOENT;
891 }
892
893 /* Remove a specific breakpoint by reference. */
894 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
895 {
896 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
897
898 breakpoint_invalidate(cpu, breakpoint->pc);
899
900 g_free(breakpoint);
901 }
902
903 /* Remove all matching breakpoints. */
904 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
905 {
906 CPUBreakpoint *bp, *next;
907
908 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
909 if (bp->flags & mask) {
910 cpu_breakpoint_remove_by_ref(cpu, bp);
911 }
912 }
913 }
914
915 /* enable or disable single step mode. EXCP_DEBUG is returned by the
916 CPU loop after each instruction */
917 void cpu_single_step(CPUState *cpu, int enabled)
918 {
919 if (cpu->singlestep_enabled != enabled) {
920 cpu->singlestep_enabled = enabled;
921 if (kvm_enabled()) {
922 kvm_update_guest_debug(cpu, 0);
923 } else {
924 /* must flush all the translated code to avoid inconsistencies */
925 /* XXX: only flush what is necessary */
926 tb_flush(cpu);
927 }
928 }
929 }
930
931 void cpu_abort(CPUState *cpu, const char *fmt, ...)
932 {
933 va_list ap;
934 va_list ap2;
935
936 va_start(ap, fmt);
937 va_copy(ap2, ap);
938 fprintf(stderr, "qemu: fatal: ");
939 vfprintf(stderr, fmt, ap);
940 fprintf(stderr, "\n");
941 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
942 if (qemu_log_separate()) {
943 qemu_log("qemu: fatal: ");
944 qemu_log_vprintf(fmt, ap2);
945 qemu_log("\n");
946 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
947 qemu_log_flush();
948 qemu_log_close();
949 }
950 va_end(ap2);
951 va_end(ap);
952 replay_finish();
953 #if defined(CONFIG_USER_ONLY)
954 {
955 struct sigaction act;
956 sigfillset(&act.sa_mask);
957 act.sa_handler = SIG_DFL;
958 sigaction(SIGABRT, &act, NULL);
959 }
960 #endif
961 abort();
962 }
963
964 #if !defined(CONFIG_USER_ONLY)
965 /* Called from RCU critical section */
966 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
967 {
968 RAMBlock *block;
969
970 block = atomic_rcu_read(&ram_list.mru_block);
971 if (block && addr - block->offset < block->max_length) {
972 return block;
973 }
974 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
975 if (addr - block->offset < block->max_length) {
976 goto found;
977 }
978 }
979
980 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
981 abort();
982
983 found:
984 /* It is safe to write mru_block outside the iothread lock. This
985 * is what happens:
986 *
987 * mru_block = xxx
988 * rcu_read_unlock()
989 * xxx removed from list
990 * rcu_read_lock()
991 * read mru_block
992 * mru_block = NULL;
993 * call_rcu(reclaim_ramblock, xxx);
994 * rcu_read_unlock()
995 *
996 * atomic_rcu_set is not needed here. The block was already published
997 * when it was placed into the list. Here we're just making an extra
998 * copy of the pointer.
999 */
1000 ram_list.mru_block = block;
1001 return block;
1002 }
1003
1004 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1005 {
1006 CPUState *cpu;
1007 ram_addr_t start1;
1008 RAMBlock *block;
1009 ram_addr_t end;
1010
1011 end = TARGET_PAGE_ALIGN(start + length);
1012 start &= TARGET_PAGE_MASK;
1013
1014 rcu_read_lock();
1015 block = qemu_get_ram_block(start);
1016 assert(block == qemu_get_ram_block(end - 1));
1017 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1018 CPU_FOREACH(cpu) {
1019 tlb_reset_dirty(cpu, start1, length);
1020 }
1021 rcu_read_unlock();
1022 }
1023
1024 /* Note: start and end must be within the same ram block. */
1025 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1026 ram_addr_t length,
1027 unsigned client)
1028 {
1029 DirtyMemoryBlocks *blocks;
1030 unsigned long end, page;
1031 bool dirty = false;
1032
1033 if (length == 0) {
1034 return false;
1035 }
1036
1037 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1038 page = start >> TARGET_PAGE_BITS;
1039
1040 rcu_read_lock();
1041
1042 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1043
1044 while (page < end) {
1045 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1046 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1047 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1048
1049 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1050 offset, num);
1051 page += num;
1052 }
1053
1054 rcu_read_unlock();
1055
1056 if (dirty && tcg_enabled()) {
1057 tlb_reset_dirty_range_all(start, length);
1058 }
1059
1060 return dirty;
1061 }
1062
1063 /* Called from RCU critical section */
1064 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1065 MemoryRegionSection *section,
1066 target_ulong vaddr,
1067 hwaddr paddr, hwaddr xlat,
1068 int prot,
1069 target_ulong *address)
1070 {
1071 hwaddr iotlb;
1072 CPUWatchpoint *wp;
1073
1074 if (memory_region_is_ram(section->mr)) {
1075 /* Normal RAM. */
1076 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1077 if (!section->readonly) {
1078 iotlb |= PHYS_SECTION_NOTDIRTY;
1079 } else {
1080 iotlb |= PHYS_SECTION_ROM;
1081 }
1082 } else {
1083 AddressSpaceDispatch *d;
1084
1085 d = atomic_rcu_read(&section->address_space->dispatch);
1086 iotlb = section - d->map.sections;
1087 iotlb += xlat;
1088 }
1089
1090 /* Make accesses to pages with watchpoints go via the
1091 watchpoint trap routines. */
1092 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1093 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1094 /* Avoid trapping reads of pages with a write breakpoint. */
1095 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1096 iotlb = PHYS_SECTION_WATCH + paddr;
1097 *address |= TLB_MMIO;
1098 break;
1099 }
1100 }
1101 }
1102
1103 return iotlb;
1104 }
1105 #endif /* defined(CONFIG_USER_ONLY) */
1106
1107 #if !defined(CONFIG_USER_ONLY)
1108
1109 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1110 uint16_t section);
1111 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1112
1113 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1114 qemu_anon_ram_alloc;
1115
1116 /*
1117 * Set a custom physical guest memory alloator.
1118 * Accelerators with unusual needs may need this. Hopefully, we can
1119 * get rid of it eventually.
1120 */
1121 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1122 {
1123 phys_mem_alloc = alloc;
1124 }
1125
1126 static uint16_t phys_section_add(PhysPageMap *map,
1127 MemoryRegionSection *section)
1128 {
1129 /* The physical section number is ORed with a page-aligned
1130 * pointer to produce the iotlb entries. Thus it should
1131 * never overflow into the page-aligned value.
1132 */
1133 assert(map->sections_nb < TARGET_PAGE_SIZE);
1134
1135 if (map->sections_nb == map->sections_nb_alloc) {
1136 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1137 map->sections = g_renew(MemoryRegionSection, map->sections,
1138 map->sections_nb_alloc);
1139 }
1140 map->sections[map->sections_nb] = *section;
1141 memory_region_ref(section->mr);
1142 return map->sections_nb++;
1143 }
1144
1145 static void phys_section_destroy(MemoryRegion *mr)
1146 {
1147 bool have_sub_page = mr->subpage;
1148
1149 memory_region_unref(mr);
1150
1151 if (have_sub_page) {
1152 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1153 object_unref(OBJECT(&subpage->iomem));
1154 g_free(subpage);
1155 }
1156 }
1157
1158 static void phys_sections_free(PhysPageMap *map)
1159 {
1160 while (map->sections_nb > 0) {
1161 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1162 phys_section_destroy(section->mr);
1163 }
1164 g_free(map->sections);
1165 g_free(map->nodes);
1166 }
1167
1168 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1169 {
1170 subpage_t *subpage;
1171 hwaddr base = section->offset_within_address_space
1172 & TARGET_PAGE_MASK;
1173 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1174 d->map.nodes, d->map.sections);
1175 MemoryRegionSection subsection = {
1176 .offset_within_address_space = base,
1177 .size = int128_make64(TARGET_PAGE_SIZE),
1178 };
1179 hwaddr start, end;
1180
1181 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1182
1183 if (!(existing->mr->subpage)) {
1184 subpage = subpage_init(d->as, base);
1185 subsection.address_space = d->as;
1186 subsection.mr = &subpage->iomem;
1187 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1188 phys_section_add(&d->map, &subsection));
1189 } else {
1190 subpage = container_of(existing->mr, subpage_t, iomem);
1191 }
1192 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1193 end = start + int128_get64(section->size) - 1;
1194 subpage_register(subpage, start, end,
1195 phys_section_add(&d->map, section));
1196 }
1197
1198
1199 static void register_multipage(AddressSpaceDispatch *d,
1200 MemoryRegionSection *section)
1201 {
1202 hwaddr start_addr = section->offset_within_address_space;
1203 uint16_t section_index = phys_section_add(&d->map, section);
1204 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1205 TARGET_PAGE_BITS));
1206
1207 assert(num_pages);
1208 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1209 }
1210
1211 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1212 {
1213 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1214 AddressSpaceDispatch *d = as->next_dispatch;
1215 MemoryRegionSection now = *section, remain = *section;
1216 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1217
1218 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1219 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1220 - now.offset_within_address_space;
1221
1222 now.size = int128_min(int128_make64(left), now.size);
1223 register_subpage(d, &now);
1224 } else {
1225 now.size = int128_zero();
1226 }
1227 while (int128_ne(remain.size, now.size)) {
1228 remain.size = int128_sub(remain.size, now.size);
1229 remain.offset_within_address_space += int128_get64(now.size);
1230 remain.offset_within_region += int128_get64(now.size);
1231 now = remain;
1232 if (int128_lt(remain.size, page_size)) {
1233 register_subpage(d, &now);
1234 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1235 now.size = page_size;
1236 register_subpage(d, &now);
1237 } else {
1238 now.size = int128_and(now.size, int128_neg(page_size));
1239 register_multipage(d, &now);
1240 }
1241 }
1242 }
1243
1244 void qemu_flush_coalesced_mmio_buffer(void)
1245 {
1246 if (kvm_enabled())
1247 kvm_flush_coalesced_mmio_buffer();
1248 }
1249
1250 void qemu_mutex_lock_ramlist(void)
1251 {
1252 qemu_mutex_lock(&ram_list.mutex);
1253 }
1254
1255 void qemu_mutex_unlock_ramlist(void)
1256 {
1257 qemu_mutex_unlock(&ram_list.mutex);
1258 }
1259
1260 #ifdef __linux__
1261 static void *file_ram_alloc(RAMBlock *block,
1262 ram_addr_t memory,
1263 const char *path,
1264 Error **errp)
1265 {
1266 bool unlink_on_error = false;
1267 char *filename;
1268 char *sanitized_name;
1269 char *c;
1270 void *area;
1271 int fd = -1;
1272 int64_t page_size;
1273
1274 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1275 error_setg(errp,
1276 "host lacks kvm mmu notifiers, -mem-path unsupported");
1277 return NULL;
1278 }
1279
1280 for (;;) {
1281 fd = open(path, O_RDWR);
1282 if (fd >= 0) {
1283 /* @path names an existing file, use it */
1284 break;
1285 }
1286 if (errno == ENOENT) {
1287 /* @path names a file that doesn't exist, create it */
1288 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1289 if (fd >= 0) {
1290 unlink_on_error = true;
1291 break;
1292 }
1293 } else if (errno == EISDIR) {
1294 /* @path names a directory, create a file there */
1295 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1296 sanitized_name = g_strdup(memory_region_name(block->mr));
1297 for (c = sanitized_name; *c != '\0'; c++) {
1298 if (*c == '/') {
1299 *c = '_';
1300 }
1301 }
1302
1303 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1304 sanitized_name);
1305 g_free(sanitized_name);
1306
1307 fd = mkstemp(filename);
1308 if (fd >= 0) {
1309 unlink(filename);
1310 g_free(filename);
1311 break;
1312 }
1313 g_free(filename);
1314 }
1315 if (errno != EEXIST && errno != EINTR) {
1316 error_setg_errno(errp, errno,
1317 "can't open backing store %s for guest RAM",
1318 path);
1319 goto error;
1320 }
1321 /*
1322 * Try again on EINTR and EEXIST. The latter happens when
1323 * something else creates the file between our two open().
1324 */
1325 }
1326
1327 page_size = qemu_fd_getpagesize(fd);
1328 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
1329
1330 if (memory < page_size) {
1331 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1332 "or larger than page size 0x%" PRIx64,
1333 memory, page_size);
1334 goto error;
1335 }
1336
1337 memory = ROUND_UP(memory, page_size);
1338
1339 /*
1340 * ftruncate is not supported by hugetlbfs in older
1341 * hosts, so don't bother bailing out on errors.
1342 * If anything goes wrong with it under other filesystems,
1343 * mmap will fail.
1344 */
1345 if (ftruncate(fd, memory)) {
1346 perror("ftruncate");
1347 }
1348
1349 area = qemu_ram_mmap(fd, memory, block->mr->align,
1350 block->flags & RAM_SHARED);
1351 if (area == MAP_FAILED) {
1352 error_setg_errno(errp, errno,
1353 "unable to map backing store for guest RAM");
1354 goto error;
1355 }
1356
1357 if (mem_prealloc) {
1358 os_mem_prealloc(fd, area, memory);
1359 }
1360
1361 block->fd = fd;
1362 return area;
1363
1364 error:
1365 if (unlink_on_error) {
1366 unlink(path);
1367 }
1368 if (fd != -1) {
1369 close(fd);
1370 }
1371 return NULL;
1372 }
1373 #endif
1374
1375 /* Called with the ramlist lock held. */
1376 static ram_addr_t find_ram_offset(ram_addr_t size)
1377 {
1378 RAMBlock *block, *next_block;
1379 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1380
1381 assert(size != 0); /* it would hand out same offset multiple times */
1382
1383 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1384 return 0;
1385 }
1386
1387 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1388 ram_addr_t end, next = RAM_ADDR_MAX;
1389
1390 end = block->offset + block->max_length;
1391
1392 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1393 if (next_block->offset >= end) {
1394 next = MIN(next, next_block->offset);
1395 }
1396 }
1397 if (next - end >= size && next - end < mingap) {
1398 offset = end;
1399 mingap = next - end;
1400 }
1401 }
1402
1403 if (offset == RAM_ADDR_MAX) {
1404 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1405 (uint64_t)size);
1406 abort();
1407 }
1408
1409 return offset;
1410 }
1411
1412 ram_addr_t last_ram_offset(void)
1413 {
1414 RAMBlock *block;
1415 ram_addr_t last = 0;
1416
1417 rcu_read_lock();
1418 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1419 last = MAX(last, block->offset + block->max_length);
1420 }
1421 rcu_read_unlock();
1422 return last;
1423 }
1424
1425 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1426 {
1427 int ret;
1428
1429 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1430 if (!machine_dump_guest_core(current_machine)) {
1431 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1432 if (ret) {
1433 perror("qemu_madvise");
1434 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1435 "but dump_guest_core=off specified\n");
1436 }
1437 }
1438 }
1439
1440 const char *qemu_ram_get_idstr(RAMBlock *rb)
1441 {
1442 return rb->idstr;
1443 }
1444
1445 /* Called with iothread lock held. */
1446 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1447 {
1448 RAMBlock *block;
1449
1450 assert(new_block);
1451 assert(!new_block->idstr[0]);
1452
1453 if (dev) {
1454 char *id = qdev_get_dev_path(dev);
1455 if (id) {
1456 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1457 g_free(id);
1458 }
1459 }
1460 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1461
1462 rcu_read_lock();
1463 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1464 if (block != new_block &&
1465 !strcmp(block->idstr, new_block->idstr)) {
1466 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1467 new_block->idstr);
1468 abort();
1469 }
1470 }
1471 rcu_read_unlock();
1472 }
1473
1474 /* Called with iothread lock held. */
1475 void qemu_ram_unset_idstr(RAMBlock *block)
1476 {
1477 /* FIXME: arch_init.c assumes that this is not called throughout
1478 * migration. Ignore the problem since hot-unplug during migration
1479 * does not work anyway.
1480 */
1481 if (block) {
1482 memset(block->idstr, 0, sizeof(block->idstr));
1483 }
1484 }
1485
1486 static int memory_try_enable_merging(void *addr, size_t len)
1487 {
1488 if (!machine_mem_merge(current_machine)) {
1489 /* disabled by the user */
1490 return 0;
1491 }
1492
1493 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1494 }
1495
1496 /* Only legal before guest might have detected the memory size: e.g. on
1497 * incoming migration, or right after reset.
1498 *
1499 * As memory core doesn't know how is memory accessed, it is up to
1500 * resize callback to update device state and/or add assertions to detect
1501 * misuse, if necessary.
1502 */
1503 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1504 {
1505 assert(block);
1506
1507 newsize = HOST_PAGE_ALIGN(newsize);
1508
1509 if (block->used_length == newsize) {
1510 return 0;
1511 }
1512
1513 if (!(block->flags & RAM_RESIZEABLE)) {
1514 error_setg_errno(errp, EINVAL,
1515 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1516 " in != 0x" RAM_ADDR_FMT, block->idstr,
1517 newsize, block->used_length);
1518 return -EINVAL;
1519 }
1520
1521 if (block->max_length < newsize) {
1522 error_setg_errno(errp, EINVAL,
1523 "Length too large: %s: 0x" RAM_ADDR_FMT
1524 " > 0x" RAM_ADDR_FMT, block->idstr,
1525 newsize, block->max_length);
1526 return -EINVAL;
1527 }
1528
1529 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1530 block->used_length = newsize;
1531 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1532 DIRTY_CLIENTS_ALL);
1533 memory_region_set_size(block->mr, newsize);
1534 if (block->resized) {
1535 block->resized(block->idstr, newsize, block->host);
1536 }
1537 return 0;
1538 }
1539
1540 /* Called with ram_list.mutex held */
1541 static void dirty_memory_extend(ram_addr_t old_ram_size,
1542 ram_addr_t new_ram_size)
1543 {
1544 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1545 DIRTY_MEMORY_BLOCK_SIZE);
1546 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1547 DIRTY_MEMORY_BLOCK_SIZE);
1548 int i;
1549
1550 /* Only need to extend if block count increased */
1551 if (new_num_blocks <= old_num_blocks) {
1552 return;
1553 }
1554
1555 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1556 DirtyMemoryBlocks *old_blocks;
1557 DirtyMemoryBlocks *new_blocks;
1558 int j;
1559
1560 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1561 new_blocks = g_malloc(sizeof(*new_blocks) +
1562 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1563
1564 if (old_num_blocks) {
1565 memcpy(new_blocks->blocks, old_blocks->blocks,
1566 old_num_blocks * sizeof(old_blocks->blocks[0]));
1567 }
1568
1569 for (j = old_num_blocks; j < new_num_blocks; j++) {
1570 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1571 }
1572
1573 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1574
1575 if (old_blocks) {
1576 g_free_rcu(old_blocks, rcu);
1577 }
1578 }
1579 }
1580
1581 static void ram_block_add(RAMBlock *new_block, Error **errp)
1582 {
1583 RAMBlock *block;
1584 RAMBlock *last_block = NULL;
1585 ram_addr_t old_ram_size, new_ram_size;
1586 Error *err = NULL;
1587
1588 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1589
1590 qemu_mutex_lock_ramlist();
1591 new_block->offset = find_ram_offset(new_block->max_length);
1592
1593 if (!new_block->host) {
1594 if (xen_enabled()) {
1595 xen_ram_alloc(new_block->offset, new_block->max_length,
1596 new_block->mr, &err);
1597 if (err) {
1598 error_propagate(errp, err);
1599 qemu_mutex_unlock_ramlist();
1600 return;
1601 }
1602 } else {
1603 new_block->host = phys_mem_alloc(new_block->max_length,
1604 &new_block->mr->align);
1605 if (!new_block->host) {
1606 error_setg_errno(errp, errno,
1607 "cannot set up guest memory '%s'",
1608 memory_region_name(new_block->mr));
1609 qemu_mutex_unlock_ramlist();
1610 return;
1611 }
1612 memory_try_enable_merging(new_block->host, new_block->max_length);
1613 }
1614 }
1615
1616 new_ram_size = MAX(old_ram_size,
1617 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1618 if (new_ram_size > old_ram_size) {
1619 migration_bitmap_extend(old_ram_size, new_ram_size);
1620 dirty_memory_extend(old_ram_size, new_ram_size);
1621 }
1622 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1623 * QLIST (which has an RCU-friendly variant) does not have insertion at
1624 * tail, so save the last element in last_block.
1625 */
1626 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1627 last_block = block;
1628 if (block->max_length < new_block->max_length) {
1629 break;
1630 }
1631 }
1632 if (block) {
1633 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1634 } else if (last_block) {
1635 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1636 } else { /* list is empty */
1637 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1638 }
1639 ram_list.mru_block = NULL;
1640
1641 /* Write list before version */
1642 smp_wmb();
1643 ram_list.version++;
1644 qemu_mutex_unlock_ramlist();
1645
1646 cpu_physical_memory_set_dirty_range(new_block->offset,
1647 new_block->used_length,
1648 DIRTY_CLIENTS_ALL);
1649
1650 if (new_block->host) {
1651 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1652 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1653 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1654 if (kvm_enabled()) {
1655 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1656 }
1657 }
1658 }
1659
1660 #ifdef __linux__
1661 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1662 bool share, const char *mem_path,
1663 Error **errp)
1664 {
1665 RAMBlock *new_block;
1666 Error *local_err = NULL;
1667
1668 if (xen_enabled()) {
1669 error_setg(errp, "-mem-path not supported with Xen");
1670 return NULL;
1671 }
1672
1673 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1674 /*
1675 * file_ram_alloc() needs to allocate just like
1676 * phys_mem_alloc, but we haven't bothered to provide
1677 * a hook there.
1678 */
1679 error_setg(errp,
1680 "-mem-path not supported with this accelerator");
1681 return NULL;
1682 }
1683
1684 size = HOST_PAGE_ALIGN(size);
1685 new_block = g_malloc0(sizeof(*new_block));
1686 new_block->mr = mr;
1687 new_block->used_length = size;
1688 new_block->max_length = size;
1689 new_block->flags = share ? RAM_SHARED : 0;
1690 new_block->host = file_ram_alloc(new_block, size,
1691 mem_path, errp);
1692 if (!new_block->host) {
1693 g_free(new_block);
1694 return NULL;
1695 }
1696
1697 ram_block_add(new_block, &local_err);
1698 if (local_err) {
1699 g_free(new_block);
1700 error_propagate(errp, local_err);
1701 return NULL;
1702 }
1703 return new_block;
1704 }
1705 #endif
1706
1707 static
1708 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1709 void (*resized)(const char*,
1710 uint64_t length,
1711 void *host),
1712 void *host, bool resizeable,
1713 MemoryRegion *mr, Error **errp)
1714 {
1715 RAMBlock *new_block;
1716 Error *local_err = NULL;
1717
1718 size = HOST_PAGE_ALIGN(size);
1719 max_size = HOST_PAGE_ALIGN(max_size);
1720 new_block = g_malloc0(sizeof(*new_block));
1721 new_block->mr = mr;
1722 new_block->resized = resized;
1723 new_block->used_length = size;
1724 new_block->max_length = max_size;
1725 assert(max_size >= size);
1726 new_block->fd = -1;
1727 new_block->host = host;
1728 if (host) {
1729 new_block->flags |= RAM_PREALLOC;
1730 }
1731 if (resizeable) {
1732 new_block->flags |= RAM_RESIZEABLE;
1733 }
1734 ram_block_add(new_block, &local_err);
1735 if (local_err) {
1736 g_free(new_block);
1737 error_propagate(errp, local_err);
1738 return NULL;
1739 }
1740 return new_block;
1741 }
1742
1743 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1744 MemoryRegion *mr, Error **errp)
1745 {
1746 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1747 }
1748
1749 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1750 {
1751 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1752 }
1753
1754 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1755 void (*resized)(const char*,
1756 uint64_t length,
1757 void *host),
1758 MemoryRegion *mr, Error **errp)
1759 {
1760 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1761 }
1762
1763 static void reclaim_ramblock(RAMBlock *block)
1764 {
1765 if (block->flags & RAM_PREALLOC) {
1766 ;
1767 } else if (xen_enabled()) {
1768 xen_invalidate_map_cache_entry(block->host);
1769 #ifndef _WIN32
1770 } else if (block->fd >= 0) {
1771 qemu_ram_munmap(block->host, block->max_length);
1772 close(block->fd);
1773 #endif
1774 } else {
1775 qemu_anon_ram_free(block->host, block->max_length);
1776 }
1777 g_free(block);
1778 }
1779
1780 void qemu_ram_free(RAMBlock *block)
1781 {
1782 if (!block) {
1783 return;
1784 }
1785
1786 qemu_mutex_lock_ramlist();
1787 QLIST_REMOVE_RCU(block, next);
1788 ram_list.mru_block = NULL;
1789 /* Write list before version */
1790 smp_wmb();
1791 ram_list.version++;
1792 call_rcu(block, reclaim_ramblock, rcu);
1793 qemu_mutex_unlock_ramlist();
1794 }
1795
1796 #ifndef _WIN32
1797 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1798 {
1799 RAMBlock *block;
1800 ram_addr_t offset;
1801 int flags;
1802 void *area, *vaddr;
1803
1804 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1805 offset = addr - block->offset;
1806 if (offset < block->max_length) {
1807 vaddr = ramblock_ptr(block, offset);
1808 if (block->flags & RAM_PREALLOC) {
1809 ;
1810 } else if (xen_enabled()) {
1811 abort();
1812 } else {
1813 flags = MAP_FIXED;
1814 if (block->fd >= 0) {
1815 flags |= (block->flags & RAM_SHARED ?
1816 MAP_SHARED : MAP_PRIVATE);
1817 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1818 flags, block->fd, offset);
1819 } else {
1820 /*
1821 * Remap needs to match alloc. Accelerators that
1822 * set phys_mem_alloc never remap. If they did,
1823 * we'd need a remap hook here.
1824 */
1825 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1826
1827 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1828 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1829 flags, -1, 0);
1830 }
1831 if (area != vaddr) {
1832 fprintf(stderr, "Could not remap addr: "
1833 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1834 length, addr);
1835 exit(1);
1836 }
1837 memory_try_enable_merging(vaddr, length);
1838 qemu_ram_setup_dump(vaddr, length);
1839 }
1840 }
1841 }
1842 }
1843 #endif /* !_WIN32 */
1844
1845 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1846 * This should not be used for general purpose DMA. Use address_space_map
1847 * or address_space_rw instead. For local memory (e.g. video ram) that the
1848 * device owns, use memory_region_get_ram_ptr.
1849 *
1850 * Called within RCU critical section.
1851 */
1852 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1853 {
1854 RAMBlock *block = ram_block;
1855
1856 if (block == NULL) {
1857 block = qemu_get_ram_block(addr);
1858 addr -= block->offset;
1859 }
1860
1861 if (xen_enabled() && block->host == NULL) {
1862 /* We need to check if the requested address is in the RAM
1863 * because we don't want to map the entire memory in QEMU.
1864 * In that case just map until the end of the page.
1865 */
1866 if (block->offset == 0) {
1867 return xen_map_cache(addr, 0, 0);
1868 }
1869
1870 block->host = xen_map_cache(block->offset, block->max_length, 1);
1871 }
1872 return ramblock_ptr(block, addr);
1873 }
1874
1875 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1876 * but takes a size argument.
1877 *
1878 * Called within RCU critical section.
1879 */
1880 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1881 hwaddr *size)
1882 {
1883 RAMBlock *block = ram_block;
1884 if (*size == 0) {
1885 return NULL;
1886 }
1887
1888 if (block == NULL) {
1889 block = qemu_get_ram_block(addr);
1890 addr -= block->offset;
1891 }
1892 *size = MIN(*size, block->max_length - addr);
1893
1894 if (xen_enabled() && block->host == NULL) {
1895 /* We need to check if the requested address is in the RAM
1896 * because we don't want to map the entire memory in QEMU.
1897 * In that case just map the requested area.
1898 */
1899 if (block->offset == 0) {
1900 return xen_map_cache(addr, *size, 1);
1901 }
1902
1903 block->host = xen_map_cache(block->offset, block->max_length, 1);
1904 }
1905
1906 return ramblock_ptr(block, addr);
1907 }
1908
1909 /*
1910 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1911 * in that RAMBlock.
1912 *
1913 * ptr: Host pointer to look up
1914 * round_offset: If true round the result offset down to a page boundary
1915 * *ram_addr: set to result ram_addr
1916 * *offset: set to result offset within the RAMBlock
1917 *
1918 * Returns: RAMBlock (or NULL if not found)
1919 *
1920 * By the time this function returns, the returned pointer is not protected
1921 * by RCU anymore. If the caller is not within an RCU critical section and
1922 * does not hold the iothread lock, it must have other means of protecting the
1923 * pointer, such as a reference to the region that includes the incoming
1924 * ram_addr_t.
1925 */
1926 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1927 ram_addr_t *offset)
1928 {
1929 RAMBlock *block;
1930 uint8_t *host = ptr;
1931
1932 if (xen_enabled()) {
1933 ram_addr_t ram_addr;
1934 rcu_read_lock();
1935 ram_addr = xen_ram_addr_from_mapcache(ptr);
1936 block = qemu_get_ram_block(ram_addr);
1937 if (block) {
1938 *offset = (host - block->host);
1939 }
1940 rcu_read_unlock();
1941 return block;
1942 }
1943
1944 rcu_read_lock();
1945 block = atomic_rcu_read(&ram_list.mru_block);
1946 if (block && block->host && host - block->host < block->max_length) {
1947 goto found;
1948 }
1949
1950 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1951 /* This case append when the block is not mapped. */
1952 if (block->host == NULL) {
1953 continue;
1954 }
1955 if (host - block->host < block->max_length) {
1956 goto found;
1957 }
1958 }
1959
1960 rcu_read_unlock();
1961 return NULL;
1962
1963 found:
1964 *offset = (host - block->host);
1965 if (round_offset) {
1966 *offset &= TARGET_PAGE_MASK;
1967 }
1968 rcu_read_unlock();
1969 return block;
1970 }
1971
1972 /*
1973 * Finds the named RAMBlock
1974 *
1975 * name: The name of RAMBlock to find
1976 *
1977 * Returns: RAMBlock (or NULL if not found)
1978 */
1979 RAMBlock *qemu_ram_block_by_name(const char *name)
1980 {
1981 RAMBlock *block;
1982
1983 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1984 if (!strcmp(name, block->idstr)) {
1985 return block;
1986 }
1987 }
1988
1989 return NULL;
1990 }
1991
1992 /* Some of the softmmu routines need to translate from a host pointer
1993 (typically a TLB entry) back to a ram offset. */
1994 ram_addr_t qemu_ram_addr_from_host(void *ptr)
1995 {
1996 RAMBlock *block;
1997 ram_addr_t offset;
1998
1999 block = qemu_ram_block_from_host(ptr, false, &offset);
2000 if (!block) {
2001 return RAM_ADDR_INVALID;
2002 }
2003
2004 return block->offset + offset;
2005 }
2006
2007 /* Called within RCU critical section. */
2008 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2009 uint64_t val, unsigned size)
2010 {
2011 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2012 tb_invalidate_phys_page_fast(ram_addr, size);
2013 }
2014 switch (size) {
2015 case 1:
2016 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2017 break;
2018 case 2:
2019 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2020 break;
2021 case 4:
2022 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2023 break;
2024 default:
2025 abort();
2026 }
2027 /* Set both VGA and migration bits for simplicity and to remove
2028 * the notdirty callback faster.
2029 */
2030 cpu_physical_memory_set_dirty_range(ram_addr, size,
2031 DIRTY_CLIENTS_NOCODE);
2032 /* we remove the notdirty callback only if the code has been
2033 flushed */
2034 if (!cpu_physical_memory_is_clean(ram_addr)) {
2035 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2036 }
2037 }
2038
2039 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2040 unsigned size, bool is_write)
2041 {
2042 return is_write;
2043 }
2044
2045 static const MemoryRegionOps notdirty_mem_ops = {
2046 .write = notdirty_mem_write,
2047 .valid.accepts = notdirty_mem_accepts,
2048 .endianness = DEVICE_NATIVE_ENDIAN,
2049 };
2050
2051 /* Generate a debug exception if a watchpoint has been hit. */
2052 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2053 {
2054 CPUState *cpu = current_cpu;
2055 CPUClass *cc = CPU_GET_CLASS(cpu);
2056 CPUArchState *env = cpu->env_ptr;
2057 target_ulong pc, cs_base;
2058 target_ulong vaddr;
2059 CPUWatchpoint *wp;
2060 uint32_t cpu_flags;
2061
2062 if (cpu->watchpoint_hit) {
2063 /* We re-entered the check after replacing the TB. Now raise
2064 * the debug interrupt so that is will trigger after the
2065 * current instruction. */
2066 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2067 return;
2068 }
2069 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2070 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2071 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2072 && (wp->flags & flags)) {
2073 if (flags == BP_MEM_READ) {
2074 wp->flags |= BP_WATCHPOINT_HIT_READ;
2075 } else {
2076 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2077 }
2078 wp->hitaddr = vaddr;
2079 wp->hitattrs = attrs;
2080 if (!cpu->watchpoint_hit) {
2081 if (wp->flags & BP_CPU &&
2082 !cc->debug_check_watchpoint(cpu, wp)) {
2083 wp->flags &= ~BP_WATCHPOINT_HIT;
2084 continue;
2085 }
2086 cpu->watchpoint_hit = wp;
2087 tb_check_watchpoint(cpu);
2088 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2089 cpu->exception_index = EXCP_DEBUG;
2090 cpu_loop_exit(cpu);
2091 } else {
2092 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2093 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2094 cpu_resume_from_signal(cpu, NULL);
2095 }
2096 }
2097 } else {
2098 wp->flags &= ~BP_WATCHPOINT_HIT;
2099 }
2100 }
2101 }
2102
2103 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2104 so these check for a hit then pass through to the normal out-of-line
2105 phys routines. */
2106 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2107 unsigned size, MemTxAttrs attrs)
2108 {
2109 MemTxResult res;
2110 uint64_t data;
2111 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2112 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2113
2114 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2115 switch (size) {
2116 case 1:
2117 data = address_space_ldub(as, addr, attrs, &res);
2118 break;
2119 case 2:
2120 data = address_space_lduw(as, addr, attrs, &res);
2121 break;
2122 case 4:
2123 data = address_space_ldl(as, addr, attrs, &res);
2124 break;
2125 default: abort();
2126 }
2127 *pdata = data;
2128 return res;
2129 }
2130
2131 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2132 uint64_t val, unsigned size,
2133 MemTxAttrs attrs)
2134 {
2135 MemTxResult res;
2136 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2137 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2138
2139 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2140 switch (size) {
2141 case 1:
2142 address_space_stb(as, addr, val, attrs, &res);
2143 break;
2144 case 2:
2145 address_space_stw(as, addr, val, attrs, &res);
2146 break;
2147 case 4:
2148 address_space_stl(as, addr, val, attrs, &res);
2149 break;
2150 default: abort();
2151 }
2152 return res;
2153 }
2154
2155 static const MemoryRegionOps watch_mem_ops = {
2156 .read_with_attrs = watch_mem_read,
2157 .write_with_attrs = watch_mem_write,
2158 .endianness = DEVICE_NATIVE_ENDIAN,
2159 };
2160
2161 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2162 unsigned len, MemTxAttrs attrs)
2163 {
2164 subpage_t *subpage = opaque;
2165 uint8_t buf[8];
2166 MemTxResult res;
2167
2168 #if defined(DEBUG_SUBPAGE)
2169 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2170 subpage, len, addr);
2171 #endif
2172 res = address_space_read(subpage->as, addr + subpage->base,
2173 attrs, buf, len);
2174 if (res) {
2175 return res;
2176 }
2177 switch (len) {
2178 case 1:
2179 *data = ldub_p(buf);
2180 return MEMTX_OK;
2181 case 2:
2182 *data = lduw_p(buf);
2183 return MEMTX_OK;
2184 case 4:
2185 *data = ldl_p(buf);
2186 return MEMTX_OK;
2187 case 8:
2188 *data = ldq_p(buf);
2189 return MEMTX_OK;
2190 default:
2191 abort();
2192 }
2193 }
2194
2195 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2196 uint64_t value, unsigned len, MemTxAttrs attrs)
2197 {
2198 subpage_t *subpage = opaque;
2199 uint8_t buf[8];
2200
2201 #if defined(DEBUG_SUBPAGE)
2202 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2203 " value %"PRIx64"\n",
2204 __func__, subpage, len, addr, value);
2205 #endif
2206 switch (len) {
2207 case 1:
2208 stb_p(buf, value);
2209 break;
2210 case 2:
2211 stw_p(buf, value);
2212 break;
2213 case 4:
2214 stl_p(buf, value);
2215 break;
2216 case 8:
2217 stq_p(buf, value);
2218 break;
2219 default:
2220 abort();
2221 }
2222 return address_space_write(subpage->as, addr + subpage->base,
2223 attrs, buf, len);
2224 }
2225
2226 static bool subpage_accepts(void *opaque, hwaddr addr,
2227 unsigned len, bool is_write)
2228 {
2229 subpage_t *subpage = opaque;
2230 #if defined(DEBUG_SUBPAGE)
2231 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2232 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2233 #endif
2234
2235 return address_space_access_valid(subpage->as, addr + subpage->base,
2236 len, is_write);
2237 }
2238
2239 static const MemoryRegionOps subpage_ops = {
2240 .read_with_attrs = subpage_read,
2241 .write_with_attrs = subpage_write,
2242 .impl.min_access_size = 1,
2243 .impl.max_access_size = 8,
2244 .valid.min_access_size = 1,
2245 .valid.max_access_size = 8,
2246 .valid.accepts = subpage_accepts,
2247 .endianness = DEVICE_NATIVE_ENDIAN,
2248 };
2249
2250 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2251 uint16_t section)
2252 {
2253 int idx, eidx;
2254
2255 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2256 return -1;
2257 idx = SUBPAGE_IDX(start);
2258 eidx = SUBPAGE_IDX(end);
2259 #if defined(DEBUG_SUBPAGE)
2260 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2261 __func__, mmio, start, end, idx, eidx, section);
2262 #endif
2263 for (; idx <= eidx; idx++) {
2264 mmio->sub_section[idx] = section;
2265 }
2266
2267 return 0;
2268 }
2269
2270 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2271 {
2272 subpage_t *mmio;
2273
2274 mmio = g_malloc0(sizeof(subpage_t));
2275
2276 mmio->as = as;
2277 mmio->base = base;
2278 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2279 NULL, TARGET_PAGE_SIZE);
2280 mmio->iomem.subpage = true;
2281 #if defined(DEBUG_SUBPAGE)
2282 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2283 mmio, base, TARGET_PAGE_SIZE);
2284 #endif
2285 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2286
2287 return mmio;
2288 }
2289
2290 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2291 MemoryRegion *mr)
2292 {
2293 assert(as);
2294 MemoryRegionSection section = {
2295 .address_space = as,
2296 .mr = mr,
2297 .offset_within_address_space = 0,
2298 .offset_within_region = 0,
2299 .size = int128_2_64(),
2300 };
2301
2302 return phys_section_add(map, &section);
2303 }
2304
2305 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2306 {
2307 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2308 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2309 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2310 MemoryRegionSection *sections = d->map.sections;
2311
2312 return sections[index & ~TARGET_PAGE_MASK].mr;
2313 }
2314
2315 static void io_mem_init(void)
2316 {
2317 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2318 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2319 NULL, UINT64_MAX);
2320 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2321 NULL, UINT64_MAX);
2322 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2323 NULL, UINT64_MAX);
2324 }
2325
2326 static void mem_begin(MemoryListener *listener)
2327 {
2328 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2329 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2330 uint16_t n;
2331
2332 n = dummy_section(&d->map, as, &io_mem_unassigned);
2333 assert(n == PHYS_SECTION_UNASSIGNED);
2334 n = dummy_section(&d->map, as, &io_mem_notdirty);
2335 assert(n == PHYS_SECTION_NOTDIRTY);
2336 n = dummy_section(&d->map, as, &io_mem_rom);
2337 assert(n == PHYS_SECTION_ROM);
2338 n = dummy_section(&d->map, as, &io_mem_watch);
2339 assert(n == PHYS_SECTION_WATCH);
2340
2341 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2342 d->as = as;
2343 as->next_dispatch = d;
2344 }
2345
2346 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2347 {
2348 phys_sections_free(&d->map);
2349 g_free(d);
2350 }
2351
2352 static void mem_commit(MemoryListener *listener)
2353 {
2354 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2355 AddressSpaceDispatch *cur = as->dispatch;
2356 AddressSpaceDispatch *next = as->next_dispatch;
2357
2358 phys_page_compact_all(next, next->map.nodes_nb);
2359
2360 atomic_rcu_set(&as->dispatch, next);
2361 if (cur) {
2362 call_rcu(cur, address_space_dispatch_free, rcu);
2363 }
2364 }
2365
2366 static void tcg_commit(MemoryListener *listener)
2367 {
2368 CPUAddressSpace *cpuas;
2369 AddressSpaceDispatch *d;
2370
2371 /* since each CPU stores ram addresses in its TLB cache, we must
2372 reset the modified entries */
2373 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2374 cpu_reloading_memory_map();
2375 /* The CPU and TLB are protected by the iothread lock.
2376 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2377 * may have split the RCU critical section.
2378 */
2379 d = atomic_rcu_read(&cpuas->as->dispatch);
2380 cpuas->memory_dispatch = d;
2381 tlb_flush(cpuas->cpu, 1);
2382 }
2383
2384 void address_space_init_dispatch(AddressSpace *as)
2385 {
2386 as->dispatch = NULL;
2387 as->dispatch_listener = (MemoryListener) {
2388 .begin = mem_begin,
2389 .commit = mem_commit,
2390 .region_add = mem_add,
2391 .region_nop = mem_add,
2392 .priority = 0,
2393 };
2394 memory_listener_register(&as->dispatch_listener, as);
2395 }
2396
2397 void address_space_unregister(AddressSpace *as)
2398 {
2399 memory_listener_unregister(&as->dispatch_listener);
2400 }
2401
2402 void address_space_destroy_dispatch(AddressSpace *as)
2403 {
2404 AddressSpaceDispatch *d = as->dispatch;
2405
2406 atomic_rcu_set(&as->dispatch, NULL);
2407 if (d) {
2408 call_rcu(d, address_space_dispatch_free, rcu);
2409 }
2410 }
2411
2412 static void memory_map_init(void)
2413 {
2414 system_memory = g_malloc(sizeof(*system_memory));
2415
2416 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2417 address_space_init(&address_space_memory, system_memory, "memory");
2418
2419 system_io = g_malloc(sizeof(*system_io));
2420 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2421 65536);
2422 address_space_init(&address_space_io, system_io, "I/O");
2423 }
2424
2425 MemoryRegion *get_system_memory(void)
2426 {
2427 return system_memory;
2428 }
2429
2430 MemoryRegion *get_system_io(void)
2431 {
2432 return system_io;
2433 }
2434
2435 #endif /* !defined(CONFIG_USER_ONLY) */
2436
2437 /* physical memory access (slow version, mainly for debug) */
2438 #if defined(CONFIG_USER_ONLY)
2439 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2440 uint8_t *buf, int len, int is_write)
2441 {
2442 int l, flags;
2443 target_ulong page;
2444 void * p;
2445
2446 while (len > 0) {
2447 page = addr & TARGET_PAGE_MASK;
2448 l = (page + TARGET_PAGE_SIZE) - addr;
2449 if (l > len)
2450 l = len;
2451 flags = page_get_flags(page);
2452 if (!(flags & PAGE_VALID))
2453 return -1;
2454 if (is_write) {
2455 if (!(flags & PAGE_WRITE))
2456 return -1;
2457 /* XXX: this code should not depend on lock_user */
2458 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2459 return -1;
2460 memcpy(p, buf, l);
2461 unlock_user(p, addr, l);
2462 } else {
2463 if (!(flags & PAGE_READ))
2464 return -1;
2465 /* XXX: this code should not depend on lock_user */
2466 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2467 return -1;
2468 memcpy(buf, p, l);
2469 unlock_user(p, addr, 0);
2470 }
2471 len -= l;
2472 buf += l;
2473 addr += l;
2474 }
2475 return 0;
2476 }
2477
2478 #else
2479
2480 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2481 hwaddr length)
2482 {
2483 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2484 addr += memory_region_get_ram_addr(mr);
2485
2486 /* No early return if dirty_log_mask is or becomes 0, because
2487 * cpu_physical_memory_set_dirty_range will still call
2488 * xen_modified_memory.
2489 */
2490 if (dirty_log_mask) {
2491 dirty_log_mask =
2492 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2493 }
2494 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2495 tb_invalidate_phys_range(addr, addr + length);
2496 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2497 }
2498 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2499 }
2500
2501 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2502 {
2503 unsigned access_size_max = mr->ops->valid.max_access_size;
2504
2505 /* Regions are assumed to support 1-4 byte accesses unless
2506 otherwise specified. */
2507 if (access_size_max == 0) {
2508 access_size_max = 4;
2509 }
2510
2511 /* Bound the maximum access by the alignment of the address. */
2512 if (!mr->ops->impl.unaligned) {
2513 unsigned align_size_max = addr & -addr;
2514 if (align_size_max != 0 && align_size_max < access_size_max) {
2515 access_size_max = align_size_max;
2516 }
2517 }
2518
2519 /* Don't attempt accesses larger than the maximum. */
2520 if (l > access_size_max) {
2521 l = access_size_max;
2522 }
2523 l = pow2floor(l);
2524
2525 return l;
2526 }
2527
2528 static bool prepare_mmio_access(MemoryRegion *mr)
2529 {
2530 bool unlocked = !qemu_mutex_iothread_locked();
2531 bool release_lock = false;
2532
2533 if (unlocked && mr->global_locking) {
2534 qemu_mutex_lock_iothread();
2535 unlocked = false;
2536 release_lock = true;
2537 }
2538 if (mr->flush_coalesced_mmio) {
2539 if (unlocked) {
2540 qemu_mutex_lock_iothread();
2541 }
2542 qemu_flush_coalesced_mmio_buffer();
2543 if (unlocked) {
2544 qemu_mutex_unlock_iothread();
2545 }
2546 }
2547
2548 return release_lock;
2549 }
2550
2551 /* Called within RCU critical section. */
2552 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2553 MemTxAttrs attrs,
2554 const uint8_t *buf,
2555 int len, hwaddr addr1,
2556 hwaddr l, MemoryRegion *mr)
2557 {
2558 uint8_t *ptr;
2559 uint64_t val;
2560 MemTxResult result = MEMTX_OK;
2561 bool release_lock = false;
2562
2563 for (;;) {
2564 if (!memory_access_is_direct(mr, true)) {
2565 release_lock |= prepare_mmio_access(mr);
2566 l = memory_access_size(mr, l, addr1);
2567 /* XXX: could force current_cpu to NULL to avoid
2568 potential bugs */
2569 switch (l) {
2570 case 8:
2571 /* 64 bit write access */
2572 val = ldq_p(buf);
2573 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2574 attrs);
2575 break;
2576 case 4:
2577 /* 32 bit write access */
2578 val = ldl_p(buf);
2579 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2580 attrs);
2581 break;
2582 case 2:
2583 /* 16 bit write access */
2584 val = lduw_p(buf);
2585 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2586 attrs);
2587 break;
2588 case 1:
2589 /* 8 bit write access */
2590 val = ldub_p(buf);
2591 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2592 attrs);
2593 break;
2594 default:
2595 abort();
2596 }
2597 } else {
2598 /* RAM case */
2599 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2600 memcpy(ptr, buf, l);
2601 invalidate_and_set_dirty(mr, addr1, l);
2602 }
2603
2604 if (release_lock) {
2605 qemu_mutex_unlock_iothread();
2606 release_lock = false;
2607 }
2608
2609 len -= l;
2610 buf += l;
2611 addr += l;
2612
2613 if (!len) {
2614 break;
2615 }
2616
2617 l = len;
2618 mr = address_space_translate(as, addr, &addr1, &l, true);
2619 }
2620
2621 return result;
2622 }
2623
2624 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2625 const uint8_t *buf, int len)
2626 {
2627 hwaddr l;
2628 hwaddr addr1;
2629 MemoryRegion *mr;
2630 MemTxResult result = MEMTX_OK;
2631
2632 if (len > 0) {
2633 rcu_read_lock();
2634 l = len;
2635 mr = address_space_translate(as, addr, &addr1, &l, true);
2636 result = address_space_write_continue(as, addr, attrs, buf, len,
2637 addr1, l, mr);
2638 rcu_read_unlock();
2639 }
2640
2641 return result;
2642 }
2643
2644 /* Called within RCU critical section. */
2645 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2646 MemTxAttrs attrs, uint8_t *buf,
2647 int len, hwaddr addr1, hwaddr l,
2648 MemoryRegion *mr)
2649 {
2650 uint8_t *ptr;
2651 uint64_t val;
2652 MemTxResult result = MEMTX_OK;
2653 bool release_lock = false;
2654
2655 for (;;) {
2656 if (!memory_access_is_direct(mr, false)) {
2657 /* I/O case */
2658 release_lock |= prepare_mmio_access(mr);
2659 l = memory_access_size(mr, l, addr1);
2660 switch (l) {
2661 case 8:
2662 /* 64 bit read access */
2663 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2664 attrs);
2665 stq_p(buf, val);
2666 break;
2667 case 4:
2668 /* 32 bit read access */
2669 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2670 attrs);
2671 stl_p(buf, val);
2672 break;
2673 case 2:
2674 /* 16 bit read access */
2675 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2676 attrs);
2677 stw_p(buf, val);
2678 break;
2679 case 1:
2680 /* 8 bit read access */
2681 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2682 attrs);
2683 stb_p(buf, val);
2684 break;
2685 default:
2686 abort();
2687 }
2688 } else {
2689 /* RAM case */
2690 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2691 memcpy(buf, ptr, l);
2692 }
2693
2694 if (release_lock) {
2695 qemu_mutex_unlock_iothread();
2696 release_lock = false;
2697 }
2698
2699 len -= l;
2700 buf += l;
2701 addr += l;
2702
2703 if (!len) {
2704 break;
2705 }
2706
2707 l = len;
2708 mr = address_space_translate(as, addr, &addr1, &l, false);
2709 }
2710
2711 return result;
2712 }
2713
2714 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2715 MemTxAttrs attrs, uint8_t *buf, int len)
2716 {
2717 hwaddr l;
2718 hwaddr addr1;
2719 MemoryRegion *mr;
2720 MemTxResult result = MEMTX_OK;
2721
2722 if (len > 0) {
2723 rcu_read_lock();
2724 l = len;
2725 mr = address_space_translate(as, addr, &addr1, &l, false);
2726 result = address_space_read_continue(as, addr, attrs, buf, len,
2727 addr1, l, mr);
2728 rcu_read_unlock();
2729 }
2730
2731 return result;
2732 }
2733
2734 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2735 uint8_t *buf, int len, bool is_write)
2736 {
2737 if (is_write) {
2738 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2739 } else {
2740 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2741 }
2742 }
2743
2744 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2745 int len, int is_write)
2746 {
2747 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2748 buf, len, is_write);
2749 }
2750
2751 enum write_rom_type {
2752 WRITE_DATA,
2753 FLUSH_CACHE,
2754 };
2755
2756 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2757 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2758 {
2759 hwaddr l;
2760 uint8_t *ptr;
2761 hwaddr addr1;
2762 MemoryRegion *mr;
2763
2764 rcu_read_lock();
2765 while (len > 0) {
2766 l = len;
2767 mr = address_space_translate(as, addr, &addr1, &l, true);
2768
2769 if (!(memory_region_is_ram(mr) ||
2770 memory_region_is_romd(mr))) {
2771 l = memory_access_size(mr, l, addr1);
2772 } else {
2773 /* ROM/RAM case */
2774 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2775 switch (type) {
2776 case WRITE_DATA:
2777 memcpy(ptr, buf, l);
2778 invalidate_and_set_dirty(mr, addr1, l);
2779 break;
2780 case FLUSH_CACHE:
2781 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2782 break;
2783 }
2784 }
2785 len -= l;
2786 buf += l;
2787 addr += l;
2788 }
2789 rcu_read_unlock();
2790 }
2791
2792 /* used for ROM loading : can write in RAM and ROM */
2793 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2794 const uint8_t *buf, int len)
2795 {
2796 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2797 }
2798
2799 void cpu_flush_icache_range(hwaddr start, int len)
2800 {
2801 /*
2802 * This function should do the same thing as an icache flush that was
2803 * triggered from within the guest. For TCG we are always cache coherent,
2804 * so there is no need to flush anything. For KVM / Xen we need to flush
2805 * the host's instruction cache at least.
2806 */
2807 if (tcg_enabled()) {
2808 return;
2809 }
2810
2811 cpu_physical_memory_write_rom_internal(&address_space_memory,
2812 start, NULL, len, FLUSH_CACHE);
2813 }
2814
2815 typedef struct {
2816 MemoryRegion *mr;
2817 void *buffer;
2818 hwaddr addr;
2819 hwaddr len;
2820 bool in_use;
2821 } BounceBuffer;
2822
2823 static BounceBuffer bounce;
2824
2825 typedef struct MapClient {
2826 QEMUBH *bh;
2827 QLIST_ENTRY(MapClient) link;
2828 } MapClient;
2829
2830 QemuMutex map_client_list_lock;
2831 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2832 = QLIST_HEAD_INITIALIZER(map_client_list);
2833
2834 static void cpu_unregister_map_client_do(MapClient *client)
2835 {
2836 QLIST_REMOVE(client, link);
2837 g_free(client);
2838 }
2839
2840 static void cpu_notify_map_clients_locked(void)
2841 {
2842 MapClient *client;
2843
2844 while (!QLIST_EMPTY(&map_client_list)) {
2845 client = QLIST_FIRST(&map_client_list);
2846 qemu_bh_schedule(client->bh);
2847 cpu_unregister_map_client_do(client);
2848 }
2849 }
2850
2851 void cpu_register_map_client(QEMUBH *bh)
2852 {
2853 MapClient *client = g_malloc(sizeof(*client));
2854
2855 qemu_mutex_lock(&map_client_list_lock);
2856 client->bh = bh;
2857 QLIST_INSERT_HEAD(&map_client_list, client, link);
2858 if (!atomic_read(&bounce.in_use)) {
2859 cpu_notify_map_clients_locked();
2860 }
2861 qemu_mutex_unlock(&map_client_list_lock);
2862 }
2863
2864 void cpu_exec_init_all(void)
2865 {
2866 qemu_mutex_init(&ram_list.mutex);
2867 io_mem_init();
2868 memory_map_init();
2869 qemu_mutex_init(&map_client_list_lock);
2870 }
2871
2872 void cpu_unregister_map_client(QEMUBH *bh)
2873 {
2874 MapClient *client;
2875
2876 qemu_mutex_lock(&map_client_list_lock);
2877 QLIST_FOREACH(client, &map_client_list, link) {
2878 if (client->bh == bh) {
2879 cpu_unregister_map_client_do(client);
2880 break;
2881 }
2882 }
2883 qemu_mutex_unlock(&map_client_list_lock);
2884 }
2885
2886 static void cpu_notify_map_clients(void)
2887 {
2888 qemu_mutex_lock(&map_client_list_lock);
2889 cpu_notify_map_clients_locked();
2890 qemu_mutex_unlock(&map_client_list_lock);
2891 }
2892
2893 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2894 {
2895 MemoryRegion *mr;
2896 hwaddr l, xlat;
2897
2898 rcu_read_lock();
2899 while (len > 0) {
2900 l = len;
2901 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2902 if (!memory_access_is_direct(mr, is_write)) {
2903 l = memory_access_size(mr, l, addr);
2904 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2905 return false;
2906 }
2907 }
2908
2909 len -= l;
2910 addr += l;
2911 }
2912 rcu_read_unlock();
2913 return true;
2914 }
2915
2916 /* Map a physical memory region into a host virtual address.
2917 * May map a subset of the requested range, given by and returned in *plen.
2918 * May return NULL if resources needed to perform the mapping are exhausted.
2919 * Use only for reads OR writes - not for read-modify-write operations.
2920 * Use cpu_register_map_client() to know when retrying the map operation is
2921 * likely to succeed.
2922 */
2923 void *address_space_map(AddressSpace *as,
2924 hwaddr addr,
2925 hwaddr *plen,
2926 bool is_write)
2927 {
2928 hwaddr len = *plen;
2929 hwaddr done = 0;
2930 hwaddr l, xlat, base;
2931 MemoryRegion *mr, *this_mr;
2932 void *ptr;
2933
2934 if (len == 0) {
2935 return NULL;
2936 }
2937
2938 l = len;
2939 rcu_read_lock();
2940 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2941
2942 if (!memory_access_is_direct(mr, is_write)) {
2943 if (atomic_xchg(&bounce.in_use, true)) {
2944 rcu_read_unlock();
2945 return NULL;
2946 }
2947 /* Avoid unbounded allocations */
2948 l = MIN(l, TARGET_PAGE_SIZE);
2949 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2950 bounce.addr = addr;
2951 bounce.len = l;
2952
2953 memory_region_ref(mr);
2954 bounce.mr = mr;
2955 if (!is_write) {
2956 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2957 bounce.buffer, l);
2958 }
2959
2960 rcu_read_unlock();
2961 *plen = l;
2962 return bounce.buffer;
2963 }
2964
2965 base = xlat;
2966
2967 for (;;) {
2968 len -= l;
2969 addr += l;
2970 done += l;
2971 if (len == 0) {
2972 break;
2973 }
2974
2975 l = len;
2976 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2977 if (this_mr != mr || xlat != base + done) {
2978 break;
2979 }
2980 }
2981
2982 memory_region_ref(mr);
2983 *plen = done;
2984 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2985 rcu_read_unlock();
2986
2987 return ptr;
2988 }
2989
2990 /* Unmaps a memory region previously mapped by address_space_map().
2991 * Will also mark the memory as dirty if is_write == 1. access_len gives
2992 * the amount of memory that was actually read or written by the caller.
2993 */
2994 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2995 int is_write, hwaddr access_len)
2996 {
2997 if (buffer != bounce.buffer) {
2998 MemoryRegion *mr;
2999 ram_addr_t addr1;
3000
3001 mr = memory_region_from_host(buffer, &addr1);
3002 assert(mr != NULL);
3003 if (is_write) {
3004 invalidate_and_set_dirty(mr, addr1, access_len);
3005 }
3006 if (xen_enabled()) {
3007 xen_invalidate_map_cache_entry(buffer);
3008 }
3009 memory_region_unref(mr);
3010 return;
3011 }
3012 if (is_write) {
3013 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3014 bounce.buffer, access_len);
3015 }
3016 qemu_vfree(bounce.buffer);
3017 bounce.buffer = NULL;
3018 memory_region_unref(bounce.mr);
3019 atomic_mb_set(&bounce.in_use, false);
3020 cpu_notify_map_clients();
3021 }
3022
3023 void *cpu_physical_memory_map(hwaddr addr,
3024 hwaddr *plen,
3025 int is_write)
3026 {
3027 return address_space_map(&address_space_memory, addr, plen, is_write);
3028 }
3029
3030 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3031 int is_write, hwaddr access_len)
3032 {
3033 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3034 }
3035
3036 /* warning: addr must be aligned */
3037 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3038 MemTxAttrs attrs,
3039 MemTxResult *result,
3040 enum device_endian endian)
3041 {
3042 uint8_t *ptr;
3043 uint64_t val;
3044 MemoryRegion *mr;
3045 hwaddr l = 4;
3046 hwaddr addr1;
3047 MemTxResult r;
3048 bool release_lock = false;
3049
3050 rcu_read_lock();
3051 mr = address_space_translate(as, addr, &addr1, &l, false);
3052 if (l < 4 || !memory_access_is_direct(mr, false)) {
3053 release_lock |= prepare_mmio_access(mr);
3054
3055 /* I/O case */
3056 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3057 #if defined(TARGET_WORDS_BIGENDIAN)
3058 if (endian == DEVICE_LITTLE_ENDIAN) {
3059 val = bswap32(val);
3060 }
3061 #else
3062 if (endian == DEVICE_BIG_ENDIAN) {
3063 val = bswap32(val);
3064 }
3065 #endif
3066 } else {
3067 /* RAM case */
3068 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3069 switch (endian) {
3070 case DEVICE_LITTLE_ENDIAN:
3071 val = ldl_le_p(ptr);
3072 break;
3073 case DEVICE_BIG_ENDIAN:
3074 val = ldl_be_p(ptr);
3075 break;
3076 default:
3077 val = ldl_p(ptr);
3078 break;
3079 }
3080 r = MEMTX_OK;
3081 }
3082 if (result) {
3083 *result = r;
3084 }
3085 if (release_lock) {
3086 qemu_mutex_unlock_iothread();
3087 }
3088 rcu_read_unlock();
3089 return val;
3090 }
3091
3092 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3093 MemTxAttrs attrs, MemTxResult *result)
3094 {
3095 return address_space_ldl_internal(as, addr, attrs, result,
3096 DEVICE_NATIVE_ENDIAN);
3097 }
3098
3099 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3100 MemTxAttrs attrs, MemTxResult *result)
3101 {
3102 return address_space_ldl_internal(as, addr, attrs, result,
3103 DEVICE_LITTLE_ENDIAN);
3104 }
3105
3106 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3107 MemTxAttrs attrs, MemTxResult *result)
3108 {
3109 return address_space_ldl_internal(as, addr, attrs, result,
3110 DEVICE_BIG_ENDIAN);
3111 }
3112
3113 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3114 {
3115 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3116 }
3117
3118 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3119 {
3120 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3121 }
3122
3123 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3124 {
3125 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3126 }
3127
3128 /* warning: addr must be aligned */
3129 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3130 MemTxAttrs attrs,
3131 MemTxResult *result,
3132 enum device_endian endian)
3133 {
3134 uint8_t *ptr;
3135 uint64_t val;
3136 MemoryRegion *mr;
3137 hwaddr l = 8;
3138 hwaddr addr1;
3139 MemTxResult r;
3140 bool release_lock = false;
3141
3142 rcu_read_lock();
3143 mr = address_space_translate(as, addr, &addr1, &l,
3144 false);
3145 if (l < 8 || !memory_access_is_direct(mr, false)) {
3146 release_lock |= prepare_mmio_access(mr);
3147
3148 /* I/O case */
3149 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3150 #if defined(TARGET_WORDS_BIGENDIAN)
3151 if (endian == DEVICE_LITTLE_ENDIAN) {
3152 val = bswap64(val);
3153 }
3154 #else
3155 if (endian == DEVICE_BIG_ENDIAN) {
3156 val = bswap64(val);
3157 }
3158 #endif
3159 } else {
3160 /* RAM case */
3161 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3162 switch (endian) {
3163 case DEVICE_LITTLE_ENDIAN:
3164 val = ldq_le_p(ptr);
3165 break;
3166 case DEVICE_BIG_ENDIAN:
3167 val = ldq_be_p(ptr);
3168 break;
3169 default:
3170 val = ldq_p(ptr);
3171 break;
3172 }
3173 r = MEMTX_OK;
3174 }
3175 if (result) {
3176 *result = r;
3177 }
3178 if (release_lock) {
3179 qemu_mutex_unlock_iothread();
3180 }
3181 rcu_read_unlock();
3182 return val;
3183 }
3184
3185 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3186 MemTxAttrs attrs, MemTxResult *result)
3187 {
3188 return address_space_ldq_internal(as, addr, attrs, result,
3189 DEVICE_NATIVE_ENDIAN);
3190 }
3191
3192 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3193 MemTxAttrs attrs, MemTxResult *result)
3194 {
3195 return address_space_ldq_internal(as, addr, attrs, result,
3196 DEVICE_LITTLE_ENDIAN);
3197 }
3198
3199 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3200 MemTxAttrs attrs, MemTxResult *result)
3201 {
3202 return address_space_ldq_internal(as, addr, attrs, result,
3203 DEVICE_BIG_ENDIAN);
3204 }
3205
3206 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3207 {
3208 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3209 }
3210
3211 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3212 {
3213 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3214 }
3215
3216 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3217 {
3218 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3219 }
3220
3221 /* XXX: optimize */
3222 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3223 MemTxAttrs attrs, MemTxResult *result)
3224 {
3225 uint8_t val;
3226 MemTxResult r;
3227
3228 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3229 if (result) {
3230 *result = r;
3231 }
3232 return val;
3233 }
3234
3235 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3236 {
3237 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3238 }
3239
3240 /* warning: addr must be aligned */
3241 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3242 hwaddr addr,
3243 MemTxAttrs attrs,
3244 MemTxResult *result,
3245 enum device_endian endian)
3246 {
3247 uint8_t *ptr;
3248 uint64_t val;
3249 MemoryRegion *mr;
3250 hwaddr l = 2;
3251 hwaddr addr1;
3252 MemTxResult r;
3253 bool release_lock = false;
3254
3255 rcu_read_lock();
3256 mr = address_space_translate(as, addr, &addr1, &l,
3257 false);
3258 if (l < 2 || !memory_access_is_direct(mr, false)) {
3259 release_lock |= prepare_mmio_access(mr);
3260
3261 /* I/O case */
3262 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3263 #if defined(TARGET_WORDS_BIGENDIAN)
3264 if (endian == DEVICE_LITTLE_ENDIAN) {
3265 val = bswap16(val);
3266 }
3267 #else
3268 if (endian == DEVICE_BIG_ENDIAN) {
3269 val = bswap16(val);
3270 }
3271 #endif
3272 } else {
3273 /* RAM case */
3274 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3275 switch (endian) {
3276 case DEVICE_LITTLE_ENDIAN:
3277 val = lduw_le_p(ptr);
3278 break;
3279 case DEVICE_BIG_ENDIAN:
3280 val = lduw_be_p(ptr);
3281 break;
3282 default:
3283 val = lduw_p(ptr);
3284 break;
3285 }
3286 r = MEMTX_OK;
3287 }
3288 if (result) {
3289 *result = r;
3290 }
3291 if (release_lock) {
3292 qemu_mutex_unlock_iothread();
3293 }
3294 rcu_read_unlock();
3295 return val;
3296 }
3297
3298 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3299 MemTxAttrs attrs, MemTxResult *result)
3300 {
3301 return address_space_lduw_internal(as, addr, attrs, result,
3302 DEVICE_NATIVE_ENDIAN);
3303 }
3304
3305 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3306 MemTxAttrs attrs, MemTxResult *result)
3307 {
3308 return address_space_lduw_internal(as, addr, attrs, result,
3309 DEVICE_LITTLE_ENDIAN);
3310 }
3311
3312 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3313 MemTxAttrs attrs, MemTxResult *result)
3314 {
3315 return address_space_lduw_internal(as, addr, attrs, result,
3316 DEVICE_BIG_ENDIAN);
3317 }
3318
3319 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3320 {
3321 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3322 }
3323
3324 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3325 {
3326 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3327 }
3328
3329 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3330 {
3331 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3332 }
3333
3334 /* warning: addr must be aligned. The ram page is not masked as dirty
3335 and the code inside is not invalidated. It is useful if the dirty
3336 bits are used to track modified PTEs */
3337 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3338 MemTxAttrs attrs, MemTxResult *result)
3339 {
3340 uint8_t *ptr;
3341 MemoryRegion *mr;
3342 hwaddr l = 4;
3343 hwaddr addr1;
3344 MemTxResult r;
3345 uint8_t dirty_log_mask;
3346 bool release_lock = false;
3347
3348 rcu_read_lock();
3349 mr = address_space_translate(as, addr, &addr1, &l,
3350 true);
3351 if (l < 4 || !memory_access_is_direct(mr, true)) {
3352 release_lock |= prepare_mmio_access(mr);
3353
3354 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3355 } else {
3356 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3357 stl_p(ptr, val);
3358
3359 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3360 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3361 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3362 4, dirty_log_mask);
3363 r = MEMTX_OK;
3364 }
3365 if (result) {
3366 *result = r;
3367 }
3368 if (release_lock) {
3369 qemu_mutex_unlock_iothread();
3370 }
3371 rcu_read_unlock();
3372 }
3373
3374 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3375 {
3376 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3377 }
3378
3379 /* warning: addr must be aligned */
3380 static inline void address_space_stl_internal(AddressSpace *as,
3381 hwaddr addr, uint32_t val,
3382 MemTxAttrs attrs,
3383 MemTxResult *result,
3384 enum device_endian endian)
3385 {
3386 uint8_t *ptr;
3387 MemoryRegion *mr;
3388 hwaddr l = 4;
3389 hwaddr addr1;
3390 MemTxResult r;
3391 bool release_lock = false;
3392
3393 rcu_read_lock();
3394 mr = address_space_translate(as, addr, &addr1, &l,
3395 true);
3396 if (l < 4 || !memory_access_is_direct(mr, true)) {
3397 release_lock |= prepare_mmio_access(mr);
3398
3399 #if defined(TARGET_WORDS_BIGENDIAN)
3400 if (endian == DEVICE_LITTLE_ENDIAN) {
3401 val = bswap32(val);
3402 }
3403 #else
3404 if (endian == DEVICE_BIG_ENDIAN) {
3405 val = bswap32(val);
3406 }
3407 #endif
3408 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3409 } else {
3410 /* RAM case */
3411 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3412 switch (endian) {
3413 case DEVICE_LITTLE_ENDIAN:
3414 stl_le_p(ptr, val);
3415 break;
3416 case DEVICE_BIG_ENDIAN:
3417 stl_be_p(ptr, val);
3418 break;
3419 default:
3420 stl_p(ptr, val);
3421 break;
3422 }
3423 invalidate_and_set_dirty(mr, addr1, 4);
3424 r = MEMTX_OK;
3425 }
3426 if (result) {
3427 *result = r;
3428 }
3429 if (release_lock) {
3430 qemu_mutex_unlock_iothread();
3431 }
3432 rcu_read_unlock();
3433 }
3434
3435 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3436 MemTxAttrs attrs, MemTxResult *result)
3437 {
3438 address_space_stl_internal(as, addr, val, attrs, result,
3439 DEVICE_NATIVE_ENDIAN);
3440 }
3441
3442 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3443 MemTxAttrs attrs, MemTxResult *result)
3444 {
3445 address_space_stl_internal(as, addr, val, attrs, result,
3446 DEVICE_LITTLE_ENDIAN);
3447 }
3448
3449 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3450 MemTxAttrs attrs, MemTxResult *result)
3451 {
3452 address_space_stl_internal(as, addr, val, attrs, result,
3453 DEVICE_BIG_ENDIAN);
3454 }
3455
3456 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3457 {
3458 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3459 }
3460
3461 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3462 {
3463 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3464 }
3465
3466 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3467 {
3468 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3469 }
3470
3471 /* XXX: optimize */
3472 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3473 MemTxAttrs attrs, MemTxResult *result)
3474 {
3475 uint8_t v = val;
3476 MemTxResult r;
3477
3478 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3479 if (result) {
3480 *result = r;
3481 }
3482 }
3483
3484 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3485 {
3486 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3487 }
3488
3489 /* warning: addr must be aligned */
3490 static inline void address_space_stw_internal(AddressSpace *as,
3491 hwaddr addr, uint32_t val,
3492 MemTxAttrs attrs,
3493 MemTxResult *result,
3494 enum device_endian endian)
3495 {
3496 uint8_t *ptr;
3497 MemoryRegion *mr;
3498 hwaddr l = 2;
3499 hwaddr addr1;
3500 MemTxResult r;
3501 bool release_lock = false;
3502
3503 rcu_read_lock();
3504 mr = address_space_translate(as, addr, &addr1, &l, true);
3505 if (l < 2 || !memory_access_is_direct(mr, true)) {
3506 release_lock |= prepare_mmio_access(mr);
3507
3508 #if defined(TARGET_WORDS_BIGENDIAN)
3509 if (endian == DEVICE_LITTLE_ENDIAN) {
3510 val = bswap16(val);
3511 }
3512 #else
3513 if (endian == DEVICE_BIG_ENDIAN) {
3514 val = bswap16(val);
3515 }
3516 #endif
3517 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3518 } else {
3519 /* RAM case */
3520 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3521 switch (endian) {
3522 case DEVICE_LITTLE_ENDIAN:
3523 stw_le_p(ptr, val);
3524 break;
3525 case DEVICE_BIG_ENDIAN:
3526 stw_be_p(ptr, val);
3527 break;
3528 default:
3529 stw_p(ptr, val);
3530 break;
3531 }
3532 invalidate_and_set_dirty(mr, addr1, 2);
3533 r = MEMTX_OK;
3534 }
3535 if (result) {
3536 *result = r;
3537 }
3538 if (release_lock) {
3539 qemu_mutex_unlock_iothread();
3540 }
3541 rcu_read_unlock();
3542 }
3543
3544 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3545 MemTxAttrs attrs, MemTxResult *result)
3546 {
3547 address_space_stw_internal(as, addr, val, attrs, result,
3548 DEVICE_NATIVE_ENDIAN);
3549 }
3550
3551 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3552 MemTxAttrs attrs, MemTxResult *result)
3553 {
3554 address_space_stw_internal(as, addr, val, attrs, result,
3555 DEVICE_LITTLE_ENDIAN);
3556 }
3557
3558 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3559 MemTxAttrs attrs, MemTxResult *result)
3560 {
3561 address_space_stw_internal(as, addr, val, attrs, result,
3562 DEVICE_BIG_ENDIAN);
3563 }
3564
3565 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)</