exec: hide mr->ram_addr from qemu_get_ram_ptr users
[qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #include <sys/mman.h>
23 #endif
24
25 #include "qemu/cutils.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "tcg.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
33 #endif
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #else /* !CONFIG_USER_ONLY */
42 #include "hw/hw.h"
43 #include "exec/memory.h"
44 #include "exec/ioport.h"
45 #include "sysemu/dma.h"
46 #include "exec/address-spaces.h"
47 #include "sysemu/xen-mapcache.h"
48 #include "trace.h"
49 #endif
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
55
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
58 #include "exec/log.h"
59
60 #include "qemu/range.h"
61 #ifndef _WIN32
62 #include "qemu/mmap-alloc.h"
63 #endif
64
65 //#define DEBUG_SUBPAGE
66
67 #if !defined(CONFIG_USER_ONLY)
68 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
70 */
71 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
72
73 static MemoryRegion *system_memory;
74 static MemoryRegion *system_io;
75
76 AddressSpace address_space_io;
77 AddressSpace address_space_memory;
78
79 MemoryRegion io_mem_rom, io_mem_notdirty;
80 static MemoryRegion io_mem_unassigned;
81
82 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83 #define RAM_PREALLOC (1 << 0)
84
85 /* RAM is mmap-ed with MAP_SHARED */
86 #define RAM_SHARED (1 << 1)
87
88 /* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
90 */
91 #define RAM_RESIZEABLE (1 << 2)
92
93 #endif
94
95 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
96 /* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
98 __thread CPUState *current_cpu;
99 /* 0 = Do not count executed instructions.
100 1 = Precise instruction counting.
101 2 = Adaptive rate instruction counting. */
102 int use_icount;
103
104 #if !defined(CONFIG_USER_ONLY)
105
106 typedef struct PhysPageEntry PhysPageEntry;
107
108 struct PhysPageEntry {
109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
110 uint32_t skip : 6;
111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 uint32_t ptr : 26;
113 };
114
115 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
117 /* Size of the L2 (and L3, etc) page tables. */
118 #define ADDR_SPACE_BITS 64
119
120 #define P_L2_BITS 9
121 #define P_L2_SIZE (1 << P_L2_BITS)
122
123 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125 typedef PhysPageEntry Node[P_L2_SIZE];
126
127 typedef struct PhysPageMap {
128 struct rcu_head rcu;
129
130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136 } PhysPageMap;
137
138 struct AddressSpaceDispatch {
139 struct rcu_head rcu;
140
141 MemoryRegionSection *mru_section;
142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
146 PhysPageMap map;
147 AddressSpace *as;
148 };
149
150 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151 typedef struct subpage_t {
152 MemoryRegion iomem;
153 AddressSpace *as;
154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156 } subpage_t;
157
158 #define PHYS_SECTION_UNASSIGNED 0
159 #define PHYS_SECTION_NOTDIRTY 1
160 #define PHYS_SECTION_ROM 2
161 #define PHYS_SECTION_WATCH 3
162
163 static void io_mem_init(void);
164 static void memory_map_init(void);
165 static void tcg_commit(MemoryListener *listener);
166
167 static MemoryRegion io_mem_watch;
168
169 /**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176 struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181 };
182
183 #endif
184
185 #if !defined(CONFIG_USER_ONLY)
186
187 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
188 {
189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
193 }
194 }
195
196 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
197 {
198 unsigned i;
199 uint32_t ret;
200 PhysPageEntry e;
201 PhysPageEntry *p;
202
203 ret = map->nodes_nb++;
204 p = map->nodes[ret];
205 assert(ret != PHYS_MAP_NODE_NIL);
206 assert(ret != map->nodes_nb_alloc);
207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
210 for (i = 0; i < P_L2_SIZE; ++i) {
211 memcpy(&p[i], &e, sizeof(e));
212 }
213 return ret;
214 }
215
216 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
218 int level)
219 {
220 PhysPageEntry *p;
221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
222
223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
224 lp->ptr = phys_map_node_alloc(map, level == 0);
225 }
226 p = map->nodes[lp->ptr];
227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
228
229 while (*nb && lp < &p[P_L2_SIZE]) {
230 if ((*index & (step - 1)) == 0 && *nb >= step) {
231 lp->skip = 0;
232 lp->ptr = leaf;
233 *index += step;
234 *nb -= step;
235 } else {
236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
237 }
238 ++lp;
239 }
240 }
241
242 static void phys_page_set(AddressSpaceDispatch *d,
243 hwaddr index, hwaddr nb,
244 uint16_t leaf)
245 {
246 /* Wildly overreserve - it doesn't matter much. */
247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
248
249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
250 }
251
252 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256 {
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303 }
304
305 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306 {
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
311 }
312 }
313
314 static inline bool section_covers_addr(const MemoryRegionSection *section,
315 hwaddr addr)
316 {
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
319 */
320 return section->size.hi ||
321 range_covers_byte(section->offset_within_address_space,
322 section->size.lo, addr);
323 }
324
325 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
326 Node *nodes, MemoryRegionSection *sections)
327 {
328 PhysPageEntry *p;
329 hwaddr index = addr >> TARGET_PAGE_BITS;
330 int i;
331
332 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
333 if (lp.ptr == PHYS_MAP_NODE_NIL) {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
336 p = nodes[lp.ptr];
337 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
338 }
339
340 if (section_covers_addr(&sections[lp.ptr], addr)) {
341 return &sections[lp.ptr];
342 } else {
343 return &sections[PHYS_SECTION_UNASSIGNED];
344 }
345 }
346
347 bool memory_region_is_unassigned(MemoryRegion *mr)
348 {
349 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
350 && mr != &io_mem_watch;
351 }
352
353 /* Called from RCU critical section */
354 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
355 hwaddr addr,
356 bool resolve_subpage)
357 {
358 MemoryRegionSection *section = atomic_read(&d->mru_section);
359 subpage_t *subpage;
360 bool update;
361
362 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
363 section_covers_addr(section, addr)) {
364 update = false;
365 } else {
366 section = phys_page_find(d->phys_map, addr, d->map.nodes,
367 d->map.sections);
368 update = true;
369 }
370 if (resolve_subpage && section->mr->subpage) {
371 subpage = container_of(section->mr, subpage_t, iomem);
372 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
373 }
374 if (update) {
375 atomic_set(&d->mru_section, section);
376 }
377 return section;
378 }
379
380 /* Called from RCU critical section */
381 static MemoryRegionSection *
382 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
383 hwaddr *plen, bool resolve_subpage)
384 {
385 MemoryRegionSection *section;
386 MemoryRegion *mr;
387 Int128 diff;
388
389 section = address_space_lookup_region(d, addr, resolve_subpage);
390 /* Compute offset within MemoryRegionSection */
391 addr -= section->offset_within_address_space;
392
393 /* Compute offset within MemoryRegion */
394 *xlat = addr + section->offset_within_region;
395
396 mr = section->mr;
397
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
403 * here.
404 *
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
408 */
409 if (memory_region_is_ram(mr)) {
410 diff = int128_sub(section->size, int128_make64(addr));
411 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
412 }
413 return section;
414 }
415
416 /* Called from RCU critical section */
417 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen,
419 bool is_write)
420 {
421 IOMMUTLBEntry iotlb;
422 MemoryRegionSection *section;
423 MemoryRegion *mr;
424
425 for (;;) {
426 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
427 section = address_space_translate_internal(d, addr, &addr, plen, true);
428 mr = section->mr;
429
430 if (!mr->iommu_ops) {
431 break;
432 }
433
434 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
435 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
436 | (addr & iotlb.addr_mask));
437 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
438 if (!(iotlb.perm & (1 << is_write))) {
439 mr = &io_mem_unassigned;
440 break;
441 }
442
443 as = iotlb.target_as;
444 }
445
446 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
447 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
448 *plen = MIN(page, *plen);
449 }
450
451 *xlat = addr;
452 return mr;
453 }
454
455 /* Called from RCU critical section */
456 MemoryRegionSection *
457 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
458 hwaddr *xlat, hwaddr *plen)
459 {
460 MemoryRegionSection *section;
461 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
462
463 section = address_space_translate_internal(d, addr, xlat, plen, false);
464
465 assert(!section->mr->iommu_ops);
466 return section;
467 }
468 #endif
469
470 #if !defined(CONFIG_USER_ONLY)
471
472 static int cpu_common_post_load(void *opaque, int version_id)
473 {
474 CPUState *cpu = opaque;
475
476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
478 cpu->interrupt_request &= ~0x01;
479 tlb_flush(cpu, 1);
480
481 return 0;
482 }
483
484 static int cpu_common_pre_load(void *opaque)
485 {
486 CPUState *cpu = opaque;
487
488 cpu->exception_index = -1;
489
490 return 0;
491 }
492
493 static bool cpu_common_exception_index_needed(void *opaque)
494 {
495 CPUState *cpu = opaque;
496
497 return tcg_enabled() && cpu->exception_index != -1;
498 }
499
500 static const VMStateDescription vmstate_cpu_common_exception_index = {
501 .name = "cpu_common/exception_index",
502 .version_id = 1,
503 .minimum_version_id = 1,
504 .needed = cpu_common_exception_index_needed,
505 .fields = (VMStateField[]) {
506 VMSTATE_INT32(exception_index, CPUState),
507 VMSTATE_END_OF_LIST()
508 }
509 };
510
511 static bool cpu_common_crash_occurred_needed(void *opaque)
512 {
513 CPUState *cpu = opaque;
514
515 return cpu->crash_occurred;
516 }
517
518 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
519 .name = "cpu_common/crash_occurred",
520 .version_id = 1,
521 .minimum_version_id = 1,
522 .needed = cpu_common_crash_occurred_needed,
523 .fields = (VMStateField[]) {
524 VMSTATE_BOOL(crash_occurred, CPUState),
525 VMSTATE_END_OF_LIST()
526 }
527 };
528
529 const VMStateDescription vmstate_cpu_common = {
530 .name = "cpu_common",
531 .version_id = 1,
532 .minimum_version_id = 1,
533 .pre_load = cpu_common_pre_load,
534 .post_load = cpu_common_post_load,
535 .fields = (VMStateField[]) {
536 VMSTATE_UINT32(halted, CPUState),
537 VMSTATE_UINT32(interrupt_request, CPUState),
538 VMSTATE_END_OF_LIST()
539 },
540 .subsections = (const VMStateDescription*[]) {
541 &vmstate_cpu_common_exception_index,
542 &vmstate_cpu_common_crash_occurred,
543 NULL
544 }
545 };
546
547 #endif
548
549 CPUState *qemu_get_cpu(int index)
550 {
551 CPUState *cpu;
552
553 CPU_FOREACH(cpu) {
554 if (cpu->cpu_index == index) {
555 return cpu;
556 }
557 }
558
559 return NULL;
560 }
561
562 #if !defined(CONFIG_USER_ONLY)
563 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
564 {
565 CPUAddressSpace *newas;
566
567 /* Target code should have set num_ases before calling us */
568 assert(asidx < cpu->num_ases);
569
570 if (asidx == 0) {
571 /* address space 0 gets the convenience alias */
572 cpu->as = as;
573 }
574
575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx == 0 || !kvm_enabled());
577
578 if (!cpu->cpu_ases) {
579 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
580 }
581
582 newas = &cpu->cpu_ases[asidx];
583 newas->cpu = cpu;
584 newas->as = as;
585 if (tcg_enabled()) {
586 newas->tcg_as_listener.commit = tcg_commit;
587 memory_listener_register(&newas->tcg_as_listener, as);
588 }
589 }
590
591 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
592 {
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu->cpu_ases[asidx].as;
595 }
596 #endif
597
598 #ifndef CONFIG_USER_ONLY
599 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
600
601 static int cpu_get_free_index(Error **errp)
602 {
603 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
604
605 if (cpu >= MAX_CPUMASK_BITS) {
606 error_setg(errp, "Trying to use more CPUs than max of %d",
607 MAX_CPUMASK_BITS);
608 return -1;
609 }
610
611 bitmap_set(cpu_index_map, cpu, 1);
612 return cpu;
613 }
614
615 void cpu_exec_exit(CPUState *cpu)
616 {
617 if (cpu->cpu_index == -1) {
618 /* cpu_index was never allocated by this @cpu or was already freed. */
619 return;
620 }
621
622 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
623 cpu->cpu_index = -1;
624 }
625 #else
626
627 static int cpu_get_free_index(Error **errp)
628 {
629 CPUState *some_cpu;
630 int cpu_index = 0;
631
632 CPU_FOREACH(some_cpu) {
633 cpu_index++;
634 }
635 return cpu_index;
636 }
637
638 void cpu_exec_exit(CPUState *cpu)
639 {
640 }
641 #endif
642
643 void cpu_exec_init(CPUState *cpu, Error **errp)
644 {
645 CPUClass *cc = CPU_GET_CLASS(cpu);
646 Error *local_err = NULL;
647
648 cpu->as = NULL;
649 cpu->num_ases = 0;
650
651 #ifndef CONFIG_USER_ONLY
652 cpu->thread_id = qemu_get_thread_id();
653
654 /* This is a softmmu CPU object, so create a property for it
655 * so users can wire up its memory. (This can't go in qom/cpu.c
656 * because that file is compiled only once for both user-mode
657 * and system builds.) The default if no link is set up is to use
658 * the system address space.
659 */
660 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
661 (Object **)&cpu->memory,
662 qdev_prop_allow_set_link_before_realize,
663 OBJ_PROP_LINK_UNREF_ON_RELEASE,
664 &error_abort);
665 cpu->memory = system_memory;
666 object_ref(OBJECT(cpu->memory));
667 #endif
668
669 #if defined(CONFIG_USER_ONLY)
670 cpu_list_lock();
671 #endif
672 cpu->cpu_index = cpu_get_free_index(&local_err);
673 if (local_err) {
674 error_propagate(errp, local_err);
675 #if defined(CONFIG_USER_ONLY)
676 cpu_list_unlock();
677 #endif
678 return;
679 }
680 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
681 #if defined(CONFIG_USER_ONLY)
682 (void) cc;
683 cpu_list_unlock();
684 #else
685 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
686 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
687 }
688 if (cc->vmsd != NULL) {
689 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
690 }
691 #endif
692 }
693
694 #if defined(CONFIG_USER_ONLY)
695 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
696 {
697 tb_invalidate_phys_page_range(pc, pc + 1, 0);
698 }
699 #else
700 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
701 {
702 MemTxAttrs attrs;
703 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
704 int asidx = cpu_asidx_from_attrs(cpu, attrs);
705 if (phys != -1) {
706 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
707 phys | (pc & ~TARGET_PAGE_MASK));
708 }
709 }
710 #endif
711
712 #if defined(CONFIG_USER_ONLY)
713 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
714
715 {
716 }
717
718 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
719 int flags)
720 {
721 return -ENOSYS;
722 }
723
724 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
725 {
726 }
727
728 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
729 int flags, CPUWatchpoint **watchpoint)
730 {
731 return -ENOSYS;
732 }
733 #else
734 /* Add a watchpoint. */
735 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
736 int flags, CPUWatchpoint **watchpoint)
737 {
738 CPUWatchpoint *wp;
739
740 /* forbid ranges which are empty or run off the end of the address space */
741 if (len == 0 || (addr + len - 1) < addr) {
742 error_report("tried to set invalid watchpoint at %"
743 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
744 return -EINVAL;
745 }
746 wp = g_malloc(sizeof(*wp));
747
748 wp->vaddr = addr;
749 wp->len = len;
750 wp->flags = flags;
751
752 /* keep all GDB-injected watchpoints in front */
753 if (flags & BP_GDB) {
754 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
755 } else {
756 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
757 }
758
759 tlb_flush_page(cpu, addr);
760
761 if (watchpoint)
762 *watchpoint = wp;
763 return 0;
764 }
765
766 /* Remove a specific watchpoint. */
767 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
768 int flags)
769 {
770 CPUWatchpoint *wp;
771
772 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
773 if (addr == wp->vaddr && len == wp->len
774 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
775 cpu_watchpoint_remove_by_ref(cpu, wp);
776 return 0;
777 }
778 }
779 return -ENOENT;
780 }
781
782 /* Remove a specific watchpoint by reference. */
783 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
784 {
785 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
786
787 tlb_flush_page(cpu, watchpoint->vaddr);
788
789 g_free(watchpoint);
790 }
791
792 /* Remove all matching watchpoints. */
793 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
794 {
795 CPUWatchpoint *wp, *next;
796
797 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
798 if (wp->flags & mask) {
799 cpu_watchpoint_remove_by_ref(cpu, wp);
800 }
801 }
802 }
803
804 /* Return true if this watchpoint address matches the specified
805 * access (ie the address range covered by the watchpoint overlaps
806 * partially or completely with the address range covered by the
807 * access).
808 */
809 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
810 vaddr addr,
811 vaddr len)
812 {
813 /* We know the lengths are non-zero, but a little caution is
814 * required to avoid errors in the case where the range ends
815 * exactly at the top of the address space and so addr + len
816 * wraps round to zero.
817 */
818 vaddr wpend = wp->vaddr + wp->len - 1;
819 vaddr addrend = addr + len - 1;
820
821 return !(addr > wpend || wp->vaddr > addrend);
822 }
823
824 #endif
825
826 /* Add a breakpoint. */
827 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
828 CPUBreakpoint **breakpoint)
829 {
830 CPUBreakpoint *bp;
831
832 bp = g_malloc(sizeof(*bp));
833
834 bp->pc = pc;
835 bp->flags = flags;
836
837 /* keep all GDB-injected breakpoints in front */
838 if (flags & BP_GDB) {
839 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
840 } else {
841 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
842 }
843
844 breakpoint_invalidate(cpu, pc);
845
846 if (breakpoint) {
847 *breakpoint = bp;
848 }
849 return 0;
850 }
851
852 /* Remove a specific breakpoint. */
853 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
854 {
855 CPUBreakpoint *bp;
856
857 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
858 if (bp->pc == pc && bp->flags == flags) {
859 cpu_breakpoint_remove_by_ref(cpu, bp);
860 return 0;
861 }
862 }
863 return -ENOENT;
864 }
865
866 /* Remove a specific breakpoint by reference. */
867 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
868 {
869 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
870
871 breakpoint_invalidate(cpu, breakpoint->pc);
872
873 g_free(breakpoint);
874 }
875
876 /* Remove all matching breakpoints. */
877 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
878 {
879 CPUBreakpoint *bp, *next;
880
881 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
882 if (bp->flags & mask) {
883 cpu_breakpoint_remove_by_ref(cpu, bp);
884 }
885 }
886 }
887
888 /* enable or disable single step mode. EXCP_DEBUG is returned by the
889 CPU loop after each instruction */
890 void cpu_single_step(CPUState *cpu, int enabled)
891 {
892 if (cpu->singlestep_enabled != enabled) {
893 cpu->singlestep_enabled = enabled;
894 if (kvm_enabled()) {
895 kvm_update_guest_debug(cpu, 0);
896 } else {
897 /* must flush all the translated code to avoid inconsistencies */
898 /* XXX: only flush what is necessary */
899 tb_flush(cpu);
900 }
901 }
902 }
903
904 void cpu_abort(CPUState *cpu, const char *fmt, ...)
905 {
906 va_list ap;
907 va_list ap2;
908
909 va_start(ap, fmt);
910 va_copy(ap2, ap);
911 fprintf(stderr, "qemu: fatal: ");
912 vfprintf(stderr, fmt, ap);
913 fprintf(stderr, "\n");
914 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
915 if (qemu_log_separate()) {
916 qemu_log("qemu: fatal: ");
917 qemu_log_vprintf(fmt, ap2);
918 qemu_log("\n");
919 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
920 qemu_log_flush();
921 qemu_log_close();
922 }
923 va_end(ap2);
924 va_end(ap);
925 replay_finish();
926 #if defined(CONFIG_USER_ONLY)
927 {
928 struct sigaction act;
929 sigfillset(&act.sa_mask);
930 act.sa_handler = SIG_DFL;
931 sigaction(SIGABRT, &act, NULL);
932 }
933 #endif
934 abort();
935 }
936
937 #if !defined(CONFIG_USER_ONLY)
938 /* Called from RCU critical section */
939 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
940 {
941 RAMBlock *block;
942
943 block = atomic_rcu_read(&ram_list.mru_block);
944 if (block && addr - block->offset < block->max_length) {
945 return block;
946 }
947 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
948 if (addr - block->offset < block->max_length) {
949 goto found;
950 }
951 }
952
953 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
954 abort();
955
956 found:
957 /* It is safe to write mru_block outside the iothread lock. This
958 * is what happens:
959 *
960 * mru_block = xxx
961 * rcu_read_unlock()
962 * xxx removed from list
963 * rcu_read_lock()
964 * read mru_block
965 * mru_block = NULL;
966 * call_rcu(reclaim_ramblock, xxx);
967 * rcu_read_unlock()
968 *
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
972 */
973 ram_list.mru_block = block;
974 return block;
975 }
976
977 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
978 {
979 CPUState *cpu;
980 ram_addr_t start1;
981 RAMBlock *block;
982 ram_addr_t end;
983
984 end = TARGET_PAGE_ALIGN(start + length);
985 start &= TARGET_PAGE_MASK;
986
987 rcu_read_lock();
988 block = qemu_get_ram_block(start);
989 assert(block == qemu_get_ram_block(end - 1));
990 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
991 CPU_FOREACH(cpu) {
992 tlb_reset_dirty(cpu, start1, length);
993 }
994 rcu_read_unlock();
995 }
996
997 /* Note: start and end must be within the same ram block. */
998 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
999 ram_addr_t length,
1000 unsigned client)
1001 {
1002 DirtyMemoryBlocks *blocks;
1003 unsigned long end, page;
1004 bool dirty = false;
1005
1006 if (length == 0) {
1007 return false;
1008 }
1009
1010 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1011 page = start >> TARGET_PAGE_BITS;
1012
1013 rcu_read_lock();
1014
1015 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1016
1017 while (page < end) {
1018 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1019 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1020 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1021
1022 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1023 offset, num);
1024 page += num;
1025 }
1026
1027 rcu_read_unlock();
1028
1029 if (dirty && tcg_enabled()) {
1030 tlb_reset_dirty_range_all(start, length);
1031 }
1032
1033 return dirty;
1034 }
1035
1036 /* Called from RCU critical section */
1037 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1038 MemoryRegionSection *section,
1039 target_ulong vaddr,
1040 hwaddr paddr, hwaddr xlat,
1041 int prot,
1042 target_ulong *address)
1043 {
1044 hwaddr iotlb;
1045 CPUWatchpoint *wp;
1046
1047 if (memory_region_is_ram(section->mr)) {
1048 /* Normal RAM. */
1049 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1050 if (!section->readonly) {
1051 iotlb |= PHYS_SECTION_NOTDIRTY;
1052 } else {
1053 iotlb |= PHYS_SECTION_ROM;
1054 }
1055 } else {
1056 AddressSpaceDispatch *d;
1057
1058 d = atomic_rcu_read(&section->address_space->dispatch);
1059 iotlb = section - d->map.sections;
1060 iotlb += xlat;
1061 }
1062
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
1065 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1066 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1069 iotlb = PHYS_SECTION_WATCH + paddr;
1070 *address |= TLB_MMIO;
1071 break;
1072 }
1073 }
1074 }
1075
1076 return iotlb;
1077 }
1078 #endif /* defined(CONFIG_USER_ONLY) */
1079
1080 #if !defined(CONFIG_USER_ONLY)
1081
1082 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1083 uint16_t section);
1084 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1085
1086 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1087 qemu_anon_ram_alloc;
1088
1089 /*
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1093 */
1094 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1095 {
1096 phys_mem_alloc = alloc;
1097 }
1098
1099 static uint16_t phys_section_add(PhysPageMap *map,
1100 MemoryRegionSection *section)
1101 {
1102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1105 */
1106 assert(map->sections_nb < TARGET_PAGE_SIZE);
1107
1108 if (map->sections_nb == map->sections_nb_alloc) {
1109 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1110 map->sections = g_renew(MemoryRegionSection, map->sections,
1111 map->sections_nb_alloc);
1112 }
1113 map->sections[map->sections_nb] = *section;
1114 memory_region_ref(section->mr);
1115 return map->sections_nb++;
1116 }
1117
1118 static void phys_section_destroy(MemoryRegion *mr)
1119 {
1120 bool have_sub_page = mr->subpage;
1121
1122 memory_region_unref(mr);
1123
1124 if (have_sub_page) {
1125 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1126 object_unref(OBJECT(&subpage->iomem));
1127 g_free(subpage);
1128 }
1129 }
1130
1131 static void phys_sections_free(PhysPageMap *map)
1132 {
1133 while (map->sections_nb > 0) {
1134 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1135 phys_section_destroy(section->mr);
1136 }
1137 g_free(map->sections);
1138 g_free(map->nodes);
1139 }
1140
1141 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1142 {
1143 subpage_t *subpage;
1144 hwaddr base = section->offset_within_address_space
1145 & TARGET_PAGE_MASK;
1146 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1147 d->map.nodes, d->map.sections);
1148 MemoryRegionSection subsection = {
1149 .offset_within_address_space = base,
1150 .size = int128_make64(TARGET_PAGE_SIZE),
1151 };
1152 hwaddr start, end;
1153
1154 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1155
1156 if (!(existing->mr->subpage)) {
1157 subpage = subpage_init(d->as, base);
1158 subsection.address_space = d->as;
1159 subsection.mr = &subpage->iomem;
1160 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1161 phys_section_add(&d->map, &subsection));
1162 } else {
1163 subpage = container_of(existing->mr, subpage_t, iomem);
1164 }
1165 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1166 end = start + int128_get64(section->size) - 1;
1167 subpage_register(subpage, start, end,
1168 phys_section_add(&d->map, section));
1169 }
1170
1171
1172 static void register_multipage(AddressSpaceDispatch *d,
1173 MemoryRegionSection *section)
1174 {
1175 hwaddr start_addr = section->offset_within_address_space;
1176 uint16_t section_index = phys_section_add(&d->map, section);
1177 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1178 TARGET_PAGE_BITS));
1179
1180 assert(num_pages);
1181 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1182 }
1183
1184 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1185 {
1186 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1187 AddressSpaceDispatch *d = as->next_dispatch;
1188 MemoryRegionSection now = *section, remain = *section;
1189 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1190
1191 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1192 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1193 - now.offset_within_address_space;
1194
1195 now.size = int128_min(int128_make64(left), now.size);
1196 register_subpage(d, &now);
1197 } else {
1198 now.size = int128_zero();
1199 }
1200 while (int128_ne(remain.size, now.size)) {
1201 remain.size = int128_sub(remain.size, now.size);
1202 remain.offset_within_address_space += int128_get64(now.size);
1203 remain.offset_within_region += int128_get64(now.size);
1204 now = remain;
1205 if (int128_lt(remain.size, page_size)) {
1206 register_subpage(d, &now);
1207 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1208 now.size = page_size;
1209 register_subpage(d, &now);
1210 } else {
1211 now.size = int128_and(now.size, int128_neg(page_size));
1212 register_multipage(d, &now);
1213 }
1214 }
1215 }
1216
1217 void qemu_flush_coalesced_mmio_buffer(void)
1218 {
1219 if (kvm_enabled())
1220 kvm_flush_coalesced_mmio_buffer();
1221 }
1222
1223 void qemu_mutex_lock_ramlist(void)
1224 {
1225 qemu_mutex_lock(&ram_list.mutex);
1226 }
1227
1228 void qemu_mutex_unlock_ramlist(void)
1229 {
1230 qemu_mutex_unlock(&ram_list.mutex);
1231 }
1232
1233 #ifdef __linux__
1234 static void *file_ram_alloc(RAMBlock *block,
1235 ram_addr_t memory,
1236 const char *path,
1237 Error **errp)
1238 {
1239 bool unlink_on_error = false;
1240 char *filename;
1241 char *sanitized_name;
1242 char *c;
1243 void *area;
1244 int fd = -1;
1245 int64_t page_size;
1246
1247 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1248 error_setg(errp,
1249 "host lacks kvm mmu notifiers, -mem-path unsupported");
1250 return NULL;
1251 }
1252
1253 for (;;) {
1254 fd = open(path, O_RDWR);
1255 if (fd >= 0) {
1256 /* @path names an existing file, use it */
1257 break;
1258 }
1259 if (errno == ENOENT) {
1260 /* @path names a file that doesn't exist, create it */
1261 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1262 if (fd >= 0) {
1263 unlink_on_error = true;
1264 break;
1265 }
1266 } else if (errno == EISDIR) {
1267 /* @path names a directory, create a file there */
1268 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1269 sanitized_name = g_strdup(memory_region_name(block->mr));
1270 for (c = sanitized_name; *c != '\0'; c++) {
1271 if (*c == '/') {
1272 *c = '_';
1273 }
1274 }
1275
1276 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1277 sanitized_name);
1278 g_free(sanitized_name);
1279
1280 fd = mkstemp(filename);
1281 if (fd >= 0) {
1282 unlink(filename);
1283 g_free(filename);
1284 break;
1285 }
1286 g_free(filename);
1287 }
1288 if (errno != EEXIST && errno != EINTR) {
1289 error_setg_errno(errp, errno,
1290 "can't open backing store %s for guest RAM",
1291 path);
1292 goto error;
1293 }
1294 /*
1295 * Try again on EINTR and EEXIST. The latter happens when
1296 * something else creates the file between our two open().
1297 */
1298 }
1299
1300 page_size = qemu_fd_getpagesize(fd);
1301 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
1302
1303 if (memory < page_size) {
1304 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1305 "or larger than page size 0x%" PRIx64,
1306 memory, page_size);
1307 goto error;
1308 }
1309
1310 memory = ROUND_UP(memory, page_size);
1311
1312 /*
1313 * ftruncate is not supported by hugetlbfs in older
1314 * hosts, so don't bother bailing out on errors.
1315 * If anything goes wrong with it under other filesystems,
1316 * mmap will fail.
1317 */
1318 if (ftruncate(fd, memory)) {
1319 perror("ftruncate");
1320 }
1321
1322 area = qemu_ram_mmap(fd, memory, block->mr->align,
1323 block->flags & RAM_SHARED);
1324 if (area == MAP_FAILED) {
1325 error_setg_errno(errp, errno,
1326 "unable to map backing store for guest RAM");
1327 goto error;
1328 }
1329
1330 if (mem_prealloc) {
1331 os_mem_prealloc(fd, area, memory);
1332 }
1333
1334 block->fd = fd;
1335 return area;
1336
1337 error:
1338 if (unlink_on_error) {
1339 unlink(path);
1340 }
1341 if (fd != -1) {
1342 close(fd);
1343 }
1344 return NULL;
1345 }
1346 #endif
1347
1348 /* Called with the ramlist lock held. */
1349 static ram_addr_t find_ram_offset(ram_addr_t size)
1350 {
1351 RAMBlock *block, *next_block;
1352 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1353
1354 assert(size != 0); /* it would hand out same offset multiple times */
1355
1356 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1357 return 0;
1358 }
1359
1360 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1361 ram_addr_t end, next = RAM_ADDR_MAX;
1362
1363 end = block->offset + block->max_length;
1364
1365 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1366 if (next_block->offset >= end) {
1367 next = MIN(next, next_block->offset);
1368 }
1369 }
1370 if (next - end >= size && next - end < mingap) {
1371 offset = end;
1372 mingap = next - end;
1373 }
1374 }
1375
1376 if (offset == RAM_ADDR_MAX) {
1377 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1378 (uint64_t)size);
1379 abort();
1380 }
1381
1382 return offset;
1383 }
1384
1385 ram_addr_t last_ram_offset(void)
1386 {
1387 RAMBlock *block;
1388 ram_addr_t last = 0;
1389
1390 rcu_read_lock();
1391 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1392 last = MAX(last, block->offset + block->max_length);
1393 }
1394 rcu_read_unlock();
1395 return last;
1396 }
1397
1398 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1399 {
1400 int ret;
1401
1402 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1403 if (!machine_dump_guest_core(current_machine)) {
1404 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1405 if (ret) {
1406 perror("qemu_madvise");
1407 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1408 "but dump_guest_core=off specified\n");
1409 }
1410 }
1411 }
1412
1413 const char *qemu_ram_get_idstr(RAMBlock *rb)
1414 {
1415 return rb->idstr;
1416 }
1417
1418 /* Called with iothread lock held. */
1419 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1420 {
1421 RAMBlock *block;
1422
1423 assert(new_block);
1424 assert(!new_block->idstr[0]);
1425
1426 if (dev) {
1427 char *id = qdev_get_dev_path(dev);
1428 if (id) {
1429 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1430 g_free(id);
1431 }
1432 }
1433 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1434
1435 rcu_read_lock();
1436 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1437 if (block != new_block &&
1438 !strcmp(block->idstr, new_block->idstr)) {
1439 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1440 new_block->idstr);
1441 abort();
1442 }
1443 }
1444 rcu_read_unlock();
1445 }
1446
1447 /* Called with iothread lock held. */
1448 void qemu_ram_unset_idstr(RAMBlock *block)
1449 {
1450 /* FIXME: arch_init.c assumes that this is not called throughout
1451 * migration. Ignore the problem since hot-unplug during migration
1452 * does not work anyway.
1453 */
1454 if (block) {
1455 memset(block->idstr, 0, sizeof(block->idstr));
1456 }
1457 }
1458
1459 static int memory_try_enable_merging(void *addr, size_t len)
1460 {
1461 if (!machine_mem_merge(current_machine)) {
1462 /* disabled by the user */
1463 return 0;
1464 }
1465
1466 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1467 }
1468
1469 /* Only legal before guest might have detected the memory size: e.g. on
1470 * incoming migration, or right after reset.
1471 *
1472 * As memory core doesn't know how is memory accessed, it is up to
1473 * resize callback to update device state and/or add assertions to detect
1474 * misuse, if necessary.
1475 */
1476 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1477 {
1478 assert(block);
1479
1480 newsize = HOST_PAGE_ALIGN(newsize);
1481
1482 if (block->used_length == newsize) {
1483 return 0;
1484 }
1485
1486 if (!(block->flags & RAM_RESIZEABLE)) {
1487 error_setg_errno(errp, EINVAL,
1488 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1489 " in != 0x" RAM_ADDR_FMT, block->idstr,
1490 newsize, block->used_length);
1491 return -EINVAL;
1492 }
1493
1494 if (block->max_length < newsize) {
1495 error_setg_errno(errp, EINVAL,
1496 "Length too large: %s: 0x" RAM_ADDR_FMT
1497 " > 0x" RAM_ADDR_FMT, block->idstr,
1498 newsize, block->max_length);
1499 return -EINVAL;
1500 }
1501
1502 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1503 block->used_length = newsize;
1504 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1505 DIRTY_CLIENTS_ALL);
1506 memory_region_set_size(block->mr, newsize);
1507 if (block->resized) {
1508 block->resized(block->idstr, newsize, block->host);
1509 }
1510 return 0;
1511 }
1512
1513 /* Called with ram_list.mutex held */
1514 static void dirty_memory_extend(ram_addr_t old_ram_size,
1515 ram_addr_t new_ram_size)
1516 {
1517 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1518 DIRTY_MEMORY_BLOCK_SIZE);
1519 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1520 DIRTY_MEMORY_BLOCK_SIZE);
1521 int i;
1522
1523 /* Only need to extend if block count increased */
1524 if (new_num_blocks <= old_num_blocks) {
1525 return;
1526 }
1527
1528 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1529 DirtyMemoryBlocks *old_blocks;
1530 DirtyMemoryBlocks *new_blocks;
1531 int j;
1532
1533 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1534 new_blocks = g_malloc(sizeof(*new_blocks) +
1535 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1536
1537 if (old_num_blocks) {
1538 memcpy(new_blocks->blocks, old_blocks->blocks,
1539 old_num_blocks * sizeof(old_blocks->blocks[0]));
1540 }
1541
1542 for (j = old_num_blocks; j < new_num_blocks; j++) {
1543 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1544 }
1545
1546 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1547
1548 if (old_blocks) {
1549 g_free_rcu(old_blocks, rcu);
1550 }
1551 }
1552 }
1553
1554 static void ram_block_add(RAMBlock *new_block, Error **errp)
1555 {
1556 RAMBlock *block;
1557 RAMBlock *last_block = NULL;
1558 ram_addr_t old_ram_size, new_ram_size;
1559 Error *err = NULL;
1560
1561 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1562
1563 qemu_mutex_lock_ramlist();
1564 new_block->offset = find_ram_offset(new_block->max_length);
1565
1566 if (!new_block->host) {
1567 if (xen_enabled()) {
1568 xen_ram_alloc(new_block->offset, new_block->max_length,
1569 new_block->mr, &err);
1570 if (err) {
1571 error_propagate(errp, err);
1572 qemu_mutex_unlock_ramlist();
1573 return;
1574 }
1575 } else {
1576 new_block->host = phys_mem_alloc(new_block->max_length,
1577 &new_block->mr->align);
1578 if (!new_block->host) {
1579 error_setg_errno(errp, errno,
1580 "cannot set up guest memory '%s'",
1581 memory_region_name(new_block->mr));
1582 qemu_mutex_unlock_ramlist();
1583 return;
1584 }
1585 memory_try_enable_merging(new_block->host, new_block->max_length);
1586 }
1587 }
1588
1589 new_ram_size = MAX(old_ram_size,
1590 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1591 if (new_ram_size > old_ram_size) {
1592 migration_bitmap_extend(old_ram_size, new_ram_size);
1593 dirty_memory_extend(old_ram_size, new_ram_size);
1594 }
1595 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1596 * QLIST (which has an RCU-friendly variant) does not have insertion at
1597 * tail, so save the last element in last_block.
1598 */
1599 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1600 last_block = block;
1601 if (block->max_length < new_block->max_length) {
1602 break;
1603 }
1604 }
1605 if (block) {
1606 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1607 } else if (last_block) {
1608 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1609 } else { /* list is empty */
1610 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1611 }
1612 ram_list.mru_block = NULL;
1613
1614 /* Write list before version */
1615 smp_wmb();
1616 ram_list.version++;
1617 qemu_mutex_unlock_ramlist();
1618
1619 cpu_physical_memory_set_dirty_range(new_block->offset,
1620 new_block->used_length,
1621 DIRTY_CLIENTS_ALL);
1622
1623 if (new_block->host) {
1624 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1625 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1626 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1627 if (kvm_enabled()) {
1628 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1629 }
1630 }
1631 }
1632
1633 #ifdef __linux__
1634 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1635 bool share, const char *mem_path,
1636 Error **errp)
1637 {
1638 RAMBlock *new_block;
1639 Error *local_err = NULL;
1640
1641 if (xen_enabled()) {
1642 error_setg(errp, "-mem-path not supported with Xen");
1643 return NULL;
1644 }
1645
1646 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1647 /*
1648 * file_ram_alloc() needs to allocate just like
1649 * phys_mem_alloc, but we haven't bothered to provide
1650 * a hook there.
1651 */
1652 error_setg(errp,
1653 "-mem-path not supported with this accelerator");
1654 return NULL;
1655 }
1656
1657 size = HOST_PAGE_ALIGN(size);
1658 new_block = g_malloc0(sizeof(*new_block));
1659 new_block->mr = mr;
1660 new_block->used_length = size;
1661 new_block->max_length = size;
1662 new_block->flags = share ? RAM_SHARED : 0;
1663 new_block->host = file_ram_alloc(new_block, size,
1664 mem_path, errp);
1665 if (!new_block->host) {
1666 g_free(new_block);
1667 return NULL;
1668 }
1669
1670 ram_block_add(new_block, &local_err);
1671 if (local_err) {
1672 g_free(new_block);
1673 error_propagate(errp, local_err);
1674 return NULL;
1675 }
1676 return new_block;
1677 }
1678 #endif
1679
1680 static
1681 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1682 void (*resized)(const char*,
1683 uint64_t length,
1684 void *host),
1685 void *host, bool resizeable,
1686 MemoryRegion *mr, Error **errp)
1687 {
1688 RAMBlock *new_block;
1689 Error *local_err = NULL;
1690
1691 size = HOST_PAGE_ALIGN(size);
1692 max_size = HOST_PAGE_ALIGN(max_size);
1693 new_block = g_malloc0(sizeof(*new_block));
1694 new_block->mr = mr;
1695 new_block->resized = resized;
1696 new_block->used_length = size;
1697 new_block->max_length = max_size;
1698 assert(max_size >= size);
1699 new_block->fd = -1;
1700 new_block->host = host;
1701 if (host) {
1702 new_block->flags |= RAM_PREALLOC;
1703 }
1704 if (resizeable) {
1705 new_block->flags |= RAM_RESIZEABLE;
1706 }
1707 ram_block_add(new_block, &local_err);
1708 if (local_err) {
1709 g_free(new_block);
1710 error_propagate(errp, local_err);
1711 return NULL;
1712 }
1713 return new_block;
1714 }
1715
1716 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1717 MemoryRegion *mr, Error **errp)
1718 {
1719 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1720 }
1721
1722 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1723 {
1724 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1725 }
1726
1727 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1728 void (*resized)(const char*,
1729 uint64_t length,
1730 void *host),
1731 MemoryRegion *mr, Error **errp)
1732 {
1733 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1734 }
1735
1736 static void reclaim_ramblock(RAMBlock *block)
1737 {
1738 if (block->flags & RAM_PREALLOC) {
1739 ;
1740 } else if (xen_enabled()) {
1741 xen_invalidate_map_cache_entry(block->host);
1742 #ifndef _WIN32
1743 } else if (block->fd >= 0) {
1744 qemu_ram_munmap(block->host, block->max_length);
1745 close(block->fd);
1746 #endif
1747 } else {
1748 qemu_anon_ram_free(block->host, block->max_length);
1749 }
1750 g_free(block);
1751 }
1752
1753 void qemu_ram_free(RAMBlock *block)
1754 {
1755 if (!block) {
1756 return;
1757 }
1758
1759 qemu_mutex_lock_ramlist();
1760 QLIST_REMOVE_RCU(block, next);
1761 ram_list.mru_block = NULL;
1762 /* Write list before version */
1763 smp_wmb();
1764 ram_list.version++;
1765 call_rcu(block, reclaim_ramblock, rcu);
1766 qemu_mutex_unlock_ramlist();
1767 }
1768
1769 #ifndef _WIN32
1770 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1771 {
1772 RAMBlock *block;
1773 ram_addr_t offset;
1774 int flags;
1775 void *area, *vaddr;
1776
1777 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1778 offset = addr - block->offset;
1779 if (offset < block->max_length) {
1780 vaddr = ramblock_ptr(block, offset);
1781 if (block->flags & RAM_PREALLOC) {
1782 ;
1783 } else if (xen_enabled()) {
1784 abort();
1785 } else {
1786 flags = MAP_FIXED;
1787 if (block->fd >= 0) {
1788 flags |= (block->flags & RAM_SHARED ?
1789 MAP_SHARED : MAP_PRIVATE);
1790 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1791 flags, block->fd, offset);
1792 } else {
1793 /*
1794 * Remap needs to match alloc. Accelerators that
1795 * set phys_mem_alloc never remap. If they did,
1796 * we'd need a remap hook here.
1797 */
1798 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1799
1800 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1801 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1802 flags, -1, 0);
1803 }
1804 if (area != vaddr) {
1805 fprintf(stderr, "Could not remap addr: "
1806 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1807 length, addr);
1808 exit(1);
1809 }
1810 memory_try_enable_merging(vaddr, length);
1811 qemu_ram_setup_dump(vaddr, length);
1812 }
1813 }
1814 }
1815 }
1816 #endif /* !_WIN32 */
1817
1818 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1819 * This should not be used for general purpose DMA. Use address_space_map
1820 * or address_space_rw instead. For local memory (e.g. video ram) that the
1821 * device owns, use memory_region_get_ram_ptr.
1822 *
1823 * Called within RCU critical section.
1824 */
1825 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1826 {
1827 RAMBlock *block = ram_block;
1828
1829 if (block == NULL) {
1830 block = qemu_get_ram_block(addr);
1831 addr -= block->offset;
1832 }
1833
1834 if (xen_enabled() && block->host == NULL) {
1835 /* We need to check if the requested address is in the RAM
1836 * because we don't want to map the entire memory in QEMU.
1837 * In that case just map until the end of the page.
1838 */
1839 if (block->offset == 0) {
1840 return xen_map_cache(addr, 0, 0);
1841 }
1842
1843 block->host = xen_map_cache(block->offset, block->max_length, 1);
1844 }
1845 return ramblock_ptr(block, addr);
1846 }
1847
1848 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1849 * but takes a size argument.
1850 *
1851 * Called within RCU critical section.
1852 */
1853 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1854 hwaddr *size)
1855 {
1856 RAMBlock *block = ram_block;
1857 if (*size == 0) {
1858 return NULL;
1859 }
1860
1861 if (block == NULL) {
1862 block = qemu_get_ram_block(addr);
1863 addr -= block->offset;
1864 }
1865 *size = MIN(*size, block->max_length - addr);
1866
1867 if (xen_enabled() && block->host == NULL) {
1868 /* We need to check if the requested address is in the RAM
1869 * because we don't want to map the entire memory in QEMU.
1870 * In that case just map the requested area.
1871 */
1872 if (block->offset == 0) {
1873 return xen_map_cache(addr, *size, 1);
1874 }
1875
1876 block->host = xen_map_cache(block->offset, block->max_length, 1);
1877 }
1878
1879 return ramblock_ptr(block, addr);
1880 }
1881
1882 /*
1883 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1884 * in that RAMBlock.
1885 *
1886 * ptr: Host pointer to look up
1887 * round_offset: If true round the result offset down to a page boundary
1888 * *ram_addr: set to result ram_addr
1889 * *offset: set to result offset within the RAMBlock
1890 *
1891 * Returns: RAMBlock (or NULL if not found)
1892 *
1893 * By the time this function returns, the returned pointer is not protected
1894 * by RCU anymore. If the caller is not within an RCU critical section and
1895 * does not hold the iothread lock, it must have other means of protecting the
1896 * pointer, such as a reference to the region that includes the incoming
1897 * ram_addr_t.
1898 */
1899 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1900 ram_addr_t *offset)
1901 {
1902 RAMBlock *block;
1903 uint8_t *host = ptr;
1904
1905 if (xen_enabled()) {
1906 ram_addr_t ram_addr;
1907 rcu_read_lock();
1908 ram_addr = xen_ram_addr_from_mapcache(ptr);
1909 block = qemu_get_ram_block(ram_addr);
1910 if (block) {
1911 *offset = (host - block->host);
1912 }
1913 rcu_read_unlock();
1914 return block;
1915 }
1916
1917 rcu_read_lock();
1918 block = atomic_rcu_read(&ram_list.mru_block);
1919 if (block && block->host && host - block->host < block->max_length) {
1920 goto found;
1921 }
1922
1923 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1924 /* This case append when the block is not mapped. */
1925 if (block->host == NULL) {
1926 continue;
1927 }
1928 if (host - block->host < block->max_length) {
1929 goto found;
1930 }
1931 }
1932
1933 rcu_read_unlock();
1934 return NULL;
1935
1936 found:
1937 *offset = (host - block->host);
1938 if (round_offset) {
1939 *offset &= TARGET_PAGE_MASK;
1940 }
1941 rcu_read_unlock();
1942 return block;
1943 }
1944
1945 /*
1946 * Finds the named RAMBlock
1947 *
1948 * name: The name of RAMBlock to find
1949 *
1950 * Returns: RAMBlock (or NULL if not found)
1951 */
1952 RAMBlock *qemu_ram_block_by_name(const char *name)
1953 {
1954 RAMBlock *block;
1955
1956 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1957 if (!strcmp(name, block->idstr)) {
1958 return block;
1959 }
1960 }
1961
1962 return NULL;
1963 }
1964
1965 /* Some of the softmmu routines need to translate from a host pointer
1966 (typically a TLB entry) back to a ram offset. */
1967 ram_addr_t qemu_ram_addr_from_host(void *ptr)
1968 {
1969 RAMBlock *block;
1970 ram_addr_t offset;
1971
1972 block = qemu_ram_block_from_host(ptr, false, &offset);
1973 if (!block) {
1974 return RAM_ADDR_INVALID;
1975 }
1976
1977 return block->offset + offset;
1978 }
1979
1980 /* Called within RCU critical section. */
1981 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1982 uint64_t val, unsigned size)
1983 {
1984 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1985 tb_invalidate_phys_page_fast(ram_addr, size);
1986 }
1987 switch (size) {
1988 case 1:
1989 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
1990 break;
1991 case 2:
1992 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
1993 break;
1994 case 4:
1995 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
1996 break;
1997 default:
1998 abort();
1999 }
2000 /* Set both VGA and migration bits for simplicity and to remove
2001 * the notdirty callback faster.
2002 */
2003 cpu_physical_memory_set_dirty_range(ram_addr, size,
2004 DIRTY_CLIENTS_NOCODE);
2005 /* we remove the notdirty callback only if the code has been
2006 flushed */
2007 if (!cpu_physical_memory_is_clean(ram_addr)) {
2008 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2009 }
2010 }
2011
2012 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2013 unsigned size, bool is_write)
2014 {
2015 return is_write;
2016 }
2017
2018 static const MemoryRegionOps notdirty_mem_ops = {
2019 .write = notdirty_mem_write,
2020 .valid.accepts = notdirty_mem_accepts,
2021 .endianness = DEVICE_NATIVE_ENDIAN,
2022 };
2023
2024 /* Generate a debug exception if a watchpoint has been hit. */
2025 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2026 {
2027 CPUState *cpu = current_cpu;
2028 CPUClass *cc = CPU_GET_CLASS(cpu);
2029 CPUArchState *env = cpu->env_ptr;
2030 target_ulong pc, cs_base;
2031 target_ulong vaddr;
2032 CPUWatchpoint *wp;
2033 uint32_t cpu_flags;
2034
2035 if (cpu->watchpoint_hit) {
2036 /* We re-entered the check after replacing the TB. Now raise
2037 * the debug interrupt so that is will trigger after the
2038 * current instruction. */
2039 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2040 return;
2041 }
2042 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2043 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2044 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2045 && (wp->flags & flags)) {
2046 if (flags == BP_MEM_READ) {
2047 wp->flags |= BP_WATCHPOINT_HIT_READ;
2048 } else {
2049 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2050 }
2051 wp->hitaddr = vaddr;
2052 wp->hitattrs = attrs;
2053 if (!cpu->watchpoint_hit) {
2054 if (wp->flags & BP_CPU &&
2055 !cc->debug_check_watchpoint(cpu, wp)) {
2056 wp->flags &= ~BP_WATCHPOINT_HIT;
2057 continue;
2058 }
2059 cpu->watchpoint_hit = wp;
2060 tb_check_watchpoint(cpu);
2061 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2062 cpu->exception_index = EXCP_DEBUG;
2063 cpu_loop_exit(cpu);
2064 } else {
2065 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2066 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2067 cpu_resume_from_signal(cpu, NULL);
2068 }
2069 }
2070 } else {
2071 wp->flags &= ~BP_WATCHPOINT_HIT;
2072 }
2073 }
2074 }
2075
2076 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2077 so these check for a hit then pass through to the normal out-of-line
2078 phys routines. */
2079 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2080 unsigned size, MemTxAttrs attrs)
2081 {
2082 MemTxResult res;
2083 uint64_t data;
2084 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2085 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2086
2087 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2088 switch (size) {
2089 case 1:
2090 data = address_space_ldub(as, addr, attrs, &res);
2091 break;
2092 case 2:
2093 data = address_space_lduw(as, addr, attrs, &res);
2094 break;
2095 case 4:
2096 data = address_space_ldl(as, addr, attrs, &res);
2097 break;
2098 default: abort();
2099 }
2100 *pdata = data;
2101 return res;
2102 }
2103
2104 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2105 uint64_t val, unsigned size,
2106 MemTxAttrs attrs)
2107 {
2108 MemTxResult res;
2109 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2110 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2111
2112 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2113 switch (size) {
2114 case 1:
2115 address_space_stb(as, addr, val, attrs, &res);
2116 break;
2117 case 2:
2118 address_space_stw(as, addr, val, attrs, &res);
2119 break;
2120 case 4:
2121 address_space_stl(as, addr, val, attrs, &res);
2122 break;
2123 default: abort();
2124 }
2125 return res;
2126 }
2127
2128 static const MemoryRegionOps watch_mem_ops = {
2129 .read_with_attrs = watch_mem_read,
2130 .write_with_attrs = watch_mem_write,
2131 .endianness = DEVICE_NATIVE_ENDIAN,
2132 };
2133
2134 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2135 unsigned len, MemTxAttrs attrs)
2136 {
2137 subpage_t *subpage = opaque;
2138 uint8_t buf[8];
2139 MemTxResult res;
2140
2141 #if defined(DEBUG_SUBPAGE)
2142 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2143 subpage, len, addr);
2144 #endif
2145 res = address_space_read(subpage->as, addr + subpage->base,
2146 attrs, buf, len);
2147 if (res) {
2148 return res;
2149 }
2150 switch (len) {
2151 case 1:
2152 *data = ldub_p(buf);
2153 return MEMTX_OK;
2154 case 2:
2155 *data = lduw_p(buf);
2156 return MEMTX_OK;
2157 case 4:
2158 *data = ldl_p(buf);
2159 return MEMTX_OK;
2160 case 8:
2161 *data = ldq_p(buf);
2162 return MEMTX_OK;
2163 default:
2164 abort();
2165 }
2166 }
2167
2168 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2169 uint64_t value, unsigned len, MemTxAttrs attrs)
2170 {
2171 subpage_t *subpage = opaque;
2172 uint8_t buf[8];
2173
2174 #if defined(DEBUG_SUBPAGE)
2175 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2176 " value %"PRIx64"\n",
2177 __func__, subpage, len, addr, value);
2178 #endif
2179 switch (len) {
2180 case 1:
2181 stb_p(buf, value);
2182 break;
2183 case 2:
2184 stw_p(buf, value);
2185 break;
2186 case 4:
2187 stl_p(buf, value);
2188 break;
2189 case 8:
2190 stq_p(buf, value);
2191 break;
2192 default:
2193 abort();
2194 }
2195 return address_space_write(subpage->as, addr + subpage->base,
2196 attrs, buf, len);
2197 }
2198
2199 static bool subpage_accepts(void *opaque, hwaddr addr,
2200 unsigned len, bool is_write)
2201 {
2202 subpage_t *subpage = opaque;
2203 #if defined(DEBUG_SUBPAGE)
2204 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2205 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2206 #endif
2207
2208 return address_space_access_valid(subpage->as, addr + subpage->base,
2209 len, is_write);
2210 }
2211
2212 static const MemoryRegionOps subpage_ops = {
2213 .read_with_attrs = subpage_read,
2214 .write_with_attrs = subpage_write,
2215 .impl.min_access_size = 1,
2216 .impl.max_access_size = 8,
2217 .valid.min_access_size = 1,
2218 .valid.max_access_size = 8,
2219 .valid.accepts = subpage_accepts,
2220 .endianness = DEVICE_NATIVE_ENDIAN,
2221 };
2222
2223 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2224 uint16_t section)
2225 {
2226 int idx, eidx;
2227
2228 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2229 return -1;
2230 idx = SUBPAGE_IDX(start);
2231 eidx = SUBPAGE_IDX(end);
2232 #if defined(DEBUG_SUBPAGE)
2233 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2234 __func__, mmio, start, end, idx, eidx, section);
2235 #endif
2236 for (; idx <= eidx; idx++) {
2237 mmio->sub_section[idx] = section;
2238 }
2239
2240 return 0;
2241 }
2242
2243 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2244 {
2245 subpage_t *mmio;
2246
2247 mmio = g_malloc0(sizeof(subpage_t));
2248
2249 mmio->as = as;
2250 mmio->base = base;
2251 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2252 NULL, TARGET_PAGE_SIZE);
2253 mmio->iomem.subpage = true;
2254 #if defined(DEBUG_SUBPAGE)
2255 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2256 mmio, base, TARGET_PAGE_SIZE);
2257 #endif
2258 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2259
2260 return mmio;
2261 }
2262
2263 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2264 MemoryRegion *mr)
2265 {
2266 assert(as);
2267 MemoryRegionSection section = {
2268 .address_space = as,
2269 .mr = mr,
2270 .offset_within_address_space = 0,
2271 .offset_within_region = 0,
2272 .size = int128_2_64(),
2273 };
2274
2275 return phys_section_add(map, &section);
2276 }
2277
2278 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2279 {
2280 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2281 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2282 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2283 MemoryRegionSection *sections = d->map.sections;
2284
2285 return sections[index & ~TARGET_PAGE_MASK].mr;
2286 }
2287
2288 static void io_mem_init(void)
2289 {
2290 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2291 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2292 NULL, UINT64_MAX);
2293 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2294 NULL, UINT64_MAX);
2295 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2296 NULL, UINT64_MAX);
2297 }
2298
2299 static void mem_begin(MemoryListener *listener)
2300 {
2301 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2302 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2303 uint16_t n;
2304
2305 n = dummy_section(&d->map, as, &io_mem_unassigned);
2306 assert(n == PHYS_SECTION_UNASSIGNED);
2307 n = dummy_section(&d->map, as, &io_mem_notdirty);
2308 assert(n == PHYS_SECTION_NOTDIRTY);
2309 n = dummy_section(&d->map, as, &io_mem_rom);
2310 assert(n == PHYS_SECTION_ROM);
2311 n = dummy_section(&d->map, as, &io_mem_watch);
2312 assert(n == PHYS_SECTION_WATCH);
2313
2314 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2315 d->as = as;
2316 as->next_dispatch = d;
2317 }
2318
2319 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2320 {
2321 phys_sections_free(&d->map);
2322 g_free(d);
2323 }
2324
2325 static void mem_commit(MemoryListener *listener)
2326 {
2327 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2328 AddressSpaceDispatch *cur = as->dispatch;
2329 AddressSpaceDispatch *next = as->next_dispatch;
2330
2331 phys_page_compact_all(next, next->map.nodes_nb);
2332
2333 atomic_rcu_set(&as->dispatch, next);
2334 if (cur) {
2335 call_rcu(cur, address_space_dispatch_free, rcu);
2336 }
2337 }
2338
2339 static void tcg_commit(MemoryListener *listener)
2340 {
2341 CPUAddressSpace *cpuas;
2342 AddressSpaceDispatch *d;
2343
2344 /* since each CPU stores ram addresses in its TLB cache, we must
2345 reset the modified entries */
2346 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2347 cpu_reloading_memory_map();
2348 /* The CPU and TLB are protected by the iothread lock.
2349 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2350 * may have split the RCU critical section.
2351 */
2352 d = atomic_rcu_read(&cpuas->as->dispatch);
2353 cpuas->memory_dispatch = d;
2354 tlb_flush(cpuas->cpu, 1);
2355 }
2356
2357 void address_space_init_dispatch(AddressSpace *as)
2358 {
2359 as->dispatch = NULL;
2360 as->dispatch_listener = (MemoryListener) {
2361 .begin = mem_begin,
2362 .commit = mem_commit,
2363 .region_add = mem_add,
2364 .region_nop = mem_add,
2365 .priority = 0,
2366 };
2367 memory_listener_register(&as->dispatch_listener, as);
2368 }
2369
2370 void address_space_unregister(AddressSpace *as)
2371 {
2372 memory_listener_unregister(&as->dispatch_listener);
2373 }
2374
2375 void address_space_destroy_dispatch(AddressSpace *as)
2376 {
2377 AddressSpaceDispatch *d = as->dispatch;
2378
2379 atomic_rcu_set(&as->dispatch, NULL);
2380 if (d) {
2381 call_rcu(d, address_space_dispatch_free, rcu);
2382 }
2383 }
2384
2385 static void memory_map_init(void)
2386 {
2387 system_memory = g_malloc(sizeof(*system_memory));
2388
2389 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2390 address_space_init(&address_space_memory, system_memory, "memory");
2391
2392 system_io = g_malloc(sizeof(*system_io));
2393 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2394 65536);
2395 address_space_init(&address_space_io, system_io, "I/O");
2396 }
2397
2398 MemoryRegion *get_system_memory(void)
2399 {
2400 return system_memory;
2401 }
2402
2403 MemoryRegion *get_system_io(void)
2404 {
2405 return system_io;
2406 }
2407
2408 #endif /* !defined(CONFIG_USER_ONLY) */
2409
2410 /* physical memory access (slow version, mainly for debug) */
2411 #if defined(CONFIG_USER_ONLY)
2412 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2413 uint8_t *buf, int len, int is_write)
2414 {
2415 int l, flags;
2416 target_ulong page;
2417 void * p;
2418
2419 while (len > 0) {
2420 page = addr & TARGET_PAGE_MASK;
2421 l = (page + TARGET_PAGE_SIZE) - addr;
2422 if (l > len)
2423 l = len;
2424 flags = page_get_flags(page);
2425 if (!(flags & PAGE_VALID))
2426 return -1;
2427 if (is_write) {
2428 if (!(flags & PAGE_WRITE))
2429 return -1;
2430 /* XXX: this code should not depend on lock_user */
2431 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2432 return -1;
2433 memcpy(p, buf, l);
2434 unlock_user(p, addr, l);
2435 } else {
2436 if (!(flags & PAGE_READ))
2437 return -1;
2438 /* XXX: this code should not depend on lock_user */
2439 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2440 return -1;
2441 memcpy(buf, p, l);
2442 unlock_user(p, addr, 0);
2443 }
2444 len -= l;
2445 buf += l;
2446 addr += l;
2447 }
2448 return 0;
2449 }
2450
2451 #else
2452
2453 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2454 hwaddr length)
2455 {
2456 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2457 addr += memory_region_get_ram_addr(mr);
2458
2459 /* No early return if dirty_log_mask is or becomes 0, because
2460 * cpu_physical_memory_set_dirty_range will still call
2461 * xen_modified_memory.
2462 */
2463 if (dirty_log_mask) {
2464 dirty_log_mask =
2465 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2466 }
2467 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2468 tb_invalidate_phys_range(addr, addr + length);
2469 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2470 }
2471 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2472 }
2473
2474 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2475 {
2476 unsigned access_size_max = mr->ops->valid.max_access_size;
2477
2478 /* Regions are assumed to support 1-4 byte accesses unless
2479 otherwise specified. */
2480 if (access_size_max == 0) {
2481 access_size_max = 4;
2482 }
2483
2484 /* Bound the maximum access by the alignment of the address. */
2485 if (!mr->ops->impl.unaligned) {
2486 unsigned align_size_max = addr & -addr;
2487 if (align_size_max != 0 && align_size_max < access_size_max) {
2488 access_size_max = align_size_max;
2489 }
2490 }
2491
2492 /* Don't attempt accesses larger than the maximum. */
2493 if (l > access_size_max) {
2494 l = access_size_max;
2495 }
2496 l = pow2floor(l);
2497
2498 return l;
2499 }
2500
2501 static bool prepare_mmio_access(MemoryRegion *mr)
2502 {
2503 bool unlocked = !qemu_mutex_iothread_locked();
2504 bool release_lock = false;
2505
2506 if (unlocked && mr->global_locking) {
2507 qemu_mutex_lock_iothread();
2508 unlocked = false;
2509 release_lock = true;
2510 }
2511 if (mr->flush_coalesced_mmio) {
2512 if (unlocked) {
2513 qemu_mutex_lock_iothread();
2514 }
2515 qemu_flush_coalesced_mmio_buffer();
2516 if (unlocked) {
2517 qemu_mutex_unlock_iothread();
2518 }
2519 }
2520
2521 return release_lock;
2522 }
2523
2524 /* Called within RCU critical section. */
2525 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2526 MemTxAttrs attrs,
2527 const uint8_t *buf,
2528 int len, hwaddr addr1,
2529 hwaddr l, MemoryRegion *mr)
2530 {
2531 uint8_t *ptr;
2532 uint64_t val;
2533 MemTxResult result = MEMTX_OK;
2534 bool release_lock = false;
2535
2536 for (;;) {
2537 if (!memory_access_is_direct(mr, true)) {
2538 release_lock |= prepare_mmio_access(mr);
2539 l = memory_access_size(mr, l, addr1);
2540 /* XXX: could force current_cpu to NULL to avoid
2541 potential bugs */
2542 switch (l) {
2543 case 8:
2544 /* 64 bit write access */
2545 val = ldq_p(buf);
2546 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2547 attrs);
2548 break;
2549 case 4:
2550 /* 32 bit write access */
2551 val = ldl_p(buf);
2552 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2553 attrs);
2554 break;
2555 case 2:
2556 /* 16 bit write access */
2557 val = lduw_p(buf);
2558 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2559 attrs);
2560 break;
2561 case 1:
2562 /* 8 bit write access */
2563 val = ldub_p(buf);
2564 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2565 attrs);
2566 break;
2567 default:
2568 abort();
2569 }
2570 } else {
2571 /* RAM case */
2572 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2573 memcpy(ptr, buf, l);
2574 invalidate_and_set_dirty(mr, addr1, l);
2575 }
2576
2577 if (release_lock) {
2578 qemu_mutex_unlock_iothread();
2579 release_lock = false;
2580 }
2581
2582 len -= l;
2583 buf += l;
2584 addr += l;
2585
2586 if (!len) {
2587 break;
2588 }
2589
2590 l = len;
2591 mr = address_space_translate(as, addr, &addr1, &l, true);
2592 }
2593
2594 return result;
2595 }
2596
2597 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2598 const uint8_t *buf, int len)
2599 {
2600 hwaddr l;
2601 hwaddr addr1;
2602 MemoryRegion *mr;
2603 MemTxResult result = MEMTX_OK;
2604
2605 if (len > 0) {
2606 rcu_read_lock();
2607 l = len;
2608 mr = address_space_translate(as, addr, &addr1, &l, true);
2609 result = address_space_write_continue(as, addr, attrs, buf, len,
2610 addr1, l, mr);
2611 rcu_read_unlock();
2612 }
2613
2614 return result;
2615 }
2616
2617 /* Called within RCU critical section. */
2618 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2619 MemTxAttrs attrs, uint8_t *buf,
2620 int len, hwaddr addr1, hwaddr l,
2621 MemoryRegion *mr)
2622 {
2623 uint8_t *ptr;
2624 uint64_t val;
2625 MemTxResult result = MEMTX_OK;
2626 bool release_lock = false;
2627
2628 for (;;) {
2629 if (!memory_access_is_direct(mr, false)) {
2630 /* I/O case */
2631 release_lock |= prepare_mmio_access(mr);
2632 l = memory_access_size(mr, l, addr1);
2633 switch (l) {
2634 case 8:
2635 /* 64 bit read access */
2636 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2637 attrs);
2638 stq_p(buf, val);
2639 break;
2640 case 4:
2641 /* 32 bit read access */
2642 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2643 attrs);
2644 stl_p(buf, val);
2645 break;
2646 case 2:
2647 /* 16 bit read access */
2648 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2649 attrs);
2650 stw_p(buf, val);
2651 break;
2652 case 1:
2653 /* 8 bit read access */
2654 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2655 attrs);
2656 stb_p(buf, val);
2657 break;
2658 default:
2659 abort();
2660 }
2661 } else {
2662 /* RAM case */
2663 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2664 memcpy(buf, ptr, l);
2665 }
2666
2667 if (release_lock) {
2668 qemu_mutex_unlock_iothread();
2669 release_lock = false;
2670 }
2671
2672 len -= l;
2673 buf += l;
2674 addr += l;
2675
2676 if (!len) {
2677 break;
2678 }
2679
2680 l = len;
2681 mr = address_space_translate(as, addr, &addr1, &l, false);
2682 }
2683
2684 return result;
2685 }
2686
2687 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2688 MemTxAttrs attrs, uint8_t *buf, int len)
2689 {
2690 hwaddr l;
2691 hwaddr addr1;
2692 MemoryRegion *mr;
2693 MemTxResult result = MEMTX_OK;
2694
2695 if (len > 0) {
2696 rcu_read_lock();
2697 l = len;
2698 mr = address_space_translate(as, addr, &addr1, &l, false);
2699 result = address_space_read_continue(as, addr, attrs, buf, len,
2700 addr1, l, mr);
2701 rcu_read_unlock();
2702 }
2703
2704 return result;
2705 }
2706
2707 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2708 uint8_t *buf, int len, bool is_write)
2709 {
2710 if (is_write) {
2711 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2712 } else {
2713 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2714 }
2715 }
2716
2717 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2718 int len, int is_write)
2719 {
2720 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2721 buf, len, is_write);
2722 }
2723
2724 enum write_rom_type {
2725 WRITE_DATA,
2726 FLUSH_CACHE,
2727 };
2728
2729 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2730 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2731 {
2732 hwaddr l;
2733 uint8_t *ptr;
2734 hwaddr addr1;
2735 MemoryRegion *mr;
2736
2737 rcu_read_lock();
2738 while (len > 0) {
2739 l = len;
2740 mr = address_space_translate(as, addr, &addr1, &l, true);
2741
2742 if (!(memory_region_is_ram(mr) ||
2743 memory_region_is_romd(mr))) {
2744 l = memory_access_size(mr, l, addr1);
2745 } else {
2746 /* ROM/RAM case */
2747 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2748 switch (type) {
2749 case WRITE_DATA:
2750 memcpy(ptr, buf, l);
2751 invalidate_and_set_dirty(mr, addr1, l);
2752 break;
2753 case FLUSH_CACHE:
2754 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2755 break;
2756 }
2757 }
2758 len -= l;
2759 buf += l;
2760 addr += l;
2761 }
2762 rcu_read_unlock();
2763 }
2764
2765 /* used for ROM loading : can write in RAM and ROM */
2766 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2767 const uint8_t *buf, int len)
2768 {
2769 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2770 }
2771
2772 void cpu_flush_icache_range(hwaddr start, int len)
2773 {
2774 /*
2775 * This function should do the same thing as an icache flush that was
2776 * triggered from within the guest. For TCG we are always cache coherent,
2777 * so there is no need to flush anything. For KVM / Xen we need to flush
2778 * the host's instruction cache at least.
2779 */
2780 if (tcg_enabled()) {
2781 return;
2782 }
2783
2784 cpu_physical_memory_write_rom_internal(&address_space_memory,
2785 start, NULL, len, FLUSH_CACHE);
2786 }
2787
2788 typedef struct {
2789 MemoryRegion *mr;
2790 void *buffer;
2791 hwaddr addr;
2792 hwaddr len;
2793 bool in_use;
2794 } BounceBuffer;
2795
2796 static BounceBuffer bounce;
2797
2798 typedef struct MapClient {
2799 QEMUBH *bh;
2800 QLIST_ENTRY(MapClient) link;
2801 } MapClient;
2802
2803 QemuMutex map_client_list_lock;
2804 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2805 = QLIST_HEAD_INITIALIZER(map_client_list);
2806
2807 static void cpu_unregister_map_client_do(MapClient *client)
2808 {
2809 QLIST_REMOVE(client, link);
2810 g_free(client);
2811 }
2812
2813 static void cpu_notify_map_clients_locked(void)
2814 {
2815 MapClient *client;
2816
2817 while (!QLIST_EMPTY(&map_client_list)) {
2818 client = QLIST_FIRST(&map_client_list);
2819 qemu_bh_schedule(client->bh);
2820 cpu_unregister_map_client_do(client);
2821 }
2822 }
2823
2824 void cpu_register_map_client(QEMUBH *bh)
2825 {
2826 MapClient *client = g_malloc(sizeof(*client));
2827
2828 qemu_mutex_lock(&map_client_list_lock);
2829 client->bh = bh;
2830 QLIST_INSERT_HEAD(&map_client_list, client, link);
2831 if (!atomic_read(&bounce.in_use)) {
2832 cpu_notify_map_clients_locked();
2833 }
2834 qemu_mutex_unlock(&map_client_list_lock);
2835 }
2836
2837 void cpu_exec_init_all(void)
2838 {
2839 qemu_mutex_init(&ram_list.mutex);
2840 io_mem_init();
2841 memory_map_init();
2842 qemu_mutex_init(&map_client_list_lock);
2843 }
2844
2845 void cpu_unregister_map_client(QEMUBH *bh)
2846 {
2847 MapClient *client;
2848
2849 qemu_mutex_lock(&map_client_list_lock);
2850 QLIST_FOREACH(client, &map_client_list, link) {
2851 if (client->bh == bh) {
2852 cpu_unregister_map_client_do(client);
2853 break;
2854 }
2855 }
2856 qemu_mutex_unlock(&map_client_list_lock);
2857 }
2858
2859 static void cpu_notify_map_clients(void)
2860 {
2861 qemu_mutex_lock(&map_client_list_lock);
2862 cpu_notify_map_clients_locked();
2863 qemu_mutex_unlock(&map_client_list_lock);
2864 }
2865
2866 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2867 {
2868 MemoryRegion *mr;
2869 hwaddr l, xlat;
2870
2871 rcu_read_lock();
2872 while (len > 0) {
2873 l = len;
2874 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2875 if (!memory_access_is_direct(mr, is_write)) {
2876 l = memory_access_size(mr, l, addr);
2877 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2878 return false;
2879 }
2880 }
2881
2882 len -= l;
2883 addr += l;
2884 }
2885 rcu_read_unlock();
2886 return true;
2887 }
2888
2889 /* Map a physical memory region into a host virtual address.
2890 * May map a subset of the requested range, given by and returned in *plen.
2891 * May return NULL if resources needed to perform the mapping are exhausted.
2892 * Use only for reads OR writes - not for read-modify-write operations.
2893 * Use cpu_register_map_client() to know when retrying the map operation is
2894 * likely to succeed.
2895 */
2896 void *address_space_map(AddressSpace *as,
2897 hwaddr addr,
2898 hwaddr *plen,
2899 bool is_write)
2900 {
2901 hwaddr len = *plen;
2902 hwaddr done = 0;
2903 hwaddr l, xlat, base;
2904 MemoryRegion *mr, *this_mr;
2905 void *ptr;
2906
2907 if (len == 0) {
2908 return NULL;
2909 }
2910
2911 l = len;
2912 rcu_read_lock();
2913 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2914
2915 if (!memory_access_is_direct(mr, is_write)) {
2916 if (atomic_xchg(&bounce.in_use, true)) {
2917 rcu_read_unlock();
2918 return NULL;
2919 }
2920 /* Avoid unbounded allocations */
2921 l = MIN(l, TARGET_PAGE_SIZE);
2922 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2923 bounce.addr = addr;
2924 bounce.len = l;
2925
2926 memory_region_ref(mr);
2927 bounce.mr = mr;
2928 if (!is_write) {
2929 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2930 bounce.buffer, l);
2931 }
2932
2933 rcu_read_unlock();
2934 *plen = l;
2935 return bounce.buffer;
2936 }
2937
2938 base = xlat;
2939
2940 for (;;) {
2941 len -= l;
2942 addr += l;
2943 done += l;
2944 if (len == 0) {
2945 break;
2946 }
2947
2948 l = len;
2949 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2950 if (this_mr != mr || xlat != base + done) {
2951 break;
2952 }
2953 }
2954
2955 memory_region_ref(mr);
2956 *plen = done;
2957 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2958 rcu_read_unlock();
2959
2960 return ptr;
2961 }
2962
2963 /* Unmaps a memory region previously mapped by address_space_map().
2964 * Will also mark the memory as dirty if is_write == 1. access_len gives
2965 * the amount of memory that was actually read or written by the caller.
2966 */
2967 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2968 int is_write, hwaddr access_len)
2969 {
2970 if (buffer != bounce.buffer) {
2971 MemoryRegion *mr;
2972 ram_addr_t addr1;
2973
2974 mr = memory_region_from_host(buffer, &addr1);
2975 assert(mr != NULL);
2976 if (is_write) {
2977 invalidate_and_set_dirty(mr, addr1, access_len);
2978 }
2979 if (xen_enabled()) {
2980 xen_invalidate_map_cache_entry(buffer);
2981 }
2982 memory_region_unref(mr);
2983 return;
2984 }
2985 if (is_write) {
2986 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2987 bounce.buffer, access_len);
2988 }
2989 qemu_vfree(bounce.buffer);
2990 bounce.buffer = NULL;
2991 memory_region_unref(bounce.mr);
2992 atomic_mb_set(&bounce.in_use, false);
2993 cpu_notify_map_clients();
2994 }
2995
2996 void *cpu_physical_memory_map(hwaddr addr,
2997 hwaddr *plen,
2998 int is_write)
2999 {
3000 return address_space_map(&address_space_memory, addr, plen, is_write);
3001 }
3002
3003 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3004 int is_write, hwaddr access_len)
3005 {
3006 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3007 }
3008
3009 /* warning: addr must be aligned */
3010 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs,
3012 MemTxResult *result,
3013 enum device_endian endian)
3014 {
3015 uint8_t *ptr;
3016 uint64_t val;
3017 MemoryRegion *mr;
3018 hwaddr l = 4;
3019 hwaddr addr1;
3020 MemTxResult r;
3021 bool release_lock = false;
3022
3023 rcu_read_lock();
3024 mr = address_space_translate(as, addr, &addr1, &l, false);
3025 if (l < 4 || !memory_access_is_direct(mr, false)) {
3026 release_lock |= prepare_mmio_access(mr);
3027
3028 /* I/O case */
3029 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3030 #if defined(TARGET_WORDS_BIGENDIAN)
3031 if (endian == DEVICE_LITTLE_ENDIAN) {
3032 val = bswap32(val);
3033 }
3034 #else
3035 if (endian == DEVICE_BIG_ENDIAN) {
3036 val = bswap32(val);
3037 }
3038 #endif
3039 } else {
3040 /* RAM case */
3041 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3042 switch (endian) {
3043 case DEVICE_LITTLE_ENDIAN:
3044 val = ldl_le_p(ptr);
3045 break;
3046 case DEVICE_BIG_ENDIAN:
3047 val = ldl_be_p(ptr);
3048 break;
3049 default:
3050 val = ldl_p(ptr);
3051 break;
3052 }
3053 r = MEMTX_OK;
3054 }
3055 if (result) {
3056 *result = r;
3057 }
3058 if (release_lock) {
3059 qemu_mutex_unlock_iothread();
3060 }
3061 rcu_read_unlock();
3062 return val;
3063 }
3064
3065 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3066 MemTxAttrs attrs, MemTxResult *result)
3067 {
3068 return address_space_ldl_internal(as, addr, attrs, result,
3069 DEVICE_NATIVE_ENDIAN);
3070 }
3071
3072 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3073 MemTxAttrs attrs, MemTxResult *result)
3074 {
3075 return address_space_ldl_internal(as, addr, attrs, result,
3076 DEVICE_LITTLE_ENDIAN);
3077 }
3078
3079 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3080 MemTxAttrs attrs, MemTxResult *result)
3081 {
3082 return address_space_ldl_internal(as, addr, attrs, result,
3083 DEVICE_BIG_ENDIAN);
3084 }
3085
3086 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3087 {
3088 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3089 }
3090
3091 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3092 {
3093 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3094 }
3095
3096 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3097 {
3098 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3099 }
3100
3101 /* warning: addr must be aligned */
3102 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3103 MemTxAttrs attrs,
3104 MemTxResult *result,
3105 enum device_endian endian)
3106 {
3107 uint8_t *ptr;
3108 uint64_t val;
3109 MemoryRegion *mr;
3110 hwaddr l = 8;
3111 hwaddr addr1;
3112 MemTxResult r;
3113 bool release_lock = false;
3114
3115 rcu_read_lock();
3116 mr = address_space_translate(as, addr, &addr1, &l,
3117 false);
3118 if (l < 8 || !memory_access_is_direct(mr, false)) {
3119 release_lock |= prepare_mmio_access(mr);
3120
3121 /* I/O case */
3122 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3123 #if defined(TARGET_WORDS_BIGENDIAN)
3124 if (endian == DEVICE_LITTLE_ENDIAN) {
3125 val = bswap64(val);
3126 }
3127 #else
3128 if (endian == DEVICE_BIG_ENDIAN) {
3129 val = bswap64(val);
3130 }
3131 #endif
3132 } else {
3133 /* RAM case */
3134 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3135 switch (endian) {
3136 case DEVICE_LITTLE_ENDIAN:
3137 val = ldq_le_p(ptr);
3138 break;
3139 case DEVICE_BIG_ENDIAN:
3140 val = ldq_be_p(ptr);
3141 break;
3142 default:
3143 val = ldq_p(ptr);
3144 break;
3145 }
3146 r = MEMTX_OK;
3147 }
3148 if (result) {
3149 *result = r;
3150 }
3151 if (release_lock) {
3152 qemu_mutex_unlock_iothread();
3153 }
3154 rcu_read_unlock();
3155 return val;
3156 }
3157
3158 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3159 MemTxAttrs attrs, MemTxResult *result)
3160 {
3161 return address_space_ldq_internal(as, addr, attrs, result,
3162 DEVICE_NATIVE_ENDIAN);
3163 }
3164
3165 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3166 MemTxAttrs attrs, MemTxResult *result)
3167 {
3168 return address_space_ldq_internal(as, addr, attrs, result,
3169 DEVICE_LITTLE_ENDIAN);
3170 }
3171
3172 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3173 MemTxAttrs attrs, MemTxResult *result)
3174 {
3175 return address_space_ldq_internal(as, addr, attrs, result,
3176 DEVICE_BIG_ENDIAN);
3177 }
3178
3179 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3180 {
3181 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3182 }
3183
3184 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3185 {
3186 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3187 }
3188
3189 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3190 {
3191 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3192 }
3193
3194 /* XXX: optimize */
3195 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3196 MemTxAttrs attrs, MemTxResult *result)
3197 {
3198 uint8_t val;
3199 MemTxResult r;
3200
3201 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3202 if (result) {
3203 *result = r;
3204 }
3205 return val;
3206 }
3207
3208 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3209 {
3210 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3211 }
3212
3213 /* warning: addr must be aligned */
3214 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3215 hwaddr addr,
3216 MemTxAttrs attrs,
3217 MemTxResult *result,
3218 enum device_endian endian)
3219 {
3220 uint8_t *ptr;
3221 uint64_t val;
3222 MemoryRegion *mr;
3223 hwaddr l = 2;
3224 hwaddr addr1;
3225 MemTxResult r;
3226 bool release_lock = false;
3227
3228 rcu_read_lock();
3229 mr = address_space_translate(as, addr, &addr1, &l,
3230 false);
3231 if (l < 2 || !memory_access_is_direct(mr, false)) {
3232 release_lock |= prepare_mmio_access(mr);
3233
3234 /* I/O case */
3235 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3236 #if defined(TARGET_WORDS_BIGENDIAN)
3237 if (endian == DEVICE_LITTLE_ENDIAN) {
3238 val = bswap16(val);
3239 }
3240 #else
3241 if (endian == DEVICE_BIG_ENDIAN) {
3242 val = bswap16(val);
3243 }
3244 #endif
3245 } else {
3246 /* RAM case */
3247 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3248 switch (endian) {
3249 case DEVICE_LITTLE_ENDIAN:
3250 val = lduw_le_p(ptr);
3251 break;
3252 case DEVICE_BIG_ENDIAN:
3253 val = lduw_be_p(ptr);
3254 break;
3255 default:
3256 val = lduw_p(ptr);
3257 break;
3258 }
3259 r = MEMTX_OK;
3260 }
3261 if (result) {
3262 *result = r;
3263 }
3264 if (release_lock) {
3265 qemu_mutex_unlock_iothread();
3266 }
3267 rcu_read_unlock();
3268 return val;
3269 }
3270
3271 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3272 MemTxAttrs attrs, MemTxResult *result)
3273 {
3274 return address_space_lduw_internal(as, addr, attrs, result,
3275 DEVICE_NATIVE_ENDIAN);
3276 }
3277
3278 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3279 MemTxAttrs attrs, MemTxResult *result)
3280 {
3281 return address_space_lduw_internal(as, addr, attrs, result,
3282 DEVICE_LITTLE_ENDIAN);
3283 }
3284
3285 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3286 MemTxAttrs attrs, MemTxResult *result)
3287 {
3288 return address_space_lduw_internal(as, addr, attrs, result,
3289 DEVICE_BIG_ENDIAN);
3290 }
3291
3292 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3293 {
3294 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3295 }
3296
3297 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3298 {
3299 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3300 }
3301
3302 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3303 {
3304 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3305 }
3306
3307 /* warning: addr must be aligned. The ram page is not masked as dirty
3308 and the code inside is not invalidated. It is useful if the dirty
3309 bits are used to track modified PTEs */
3310 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3311 MemTxAttrs attrs, MemTxResult *result)
3312 {
3313 uint8_t *ptr;
3314 MemoryRegion *mr;
3315 hwaddr l = 4;
3316 hwaddr addr1;
3317 MemTxResult r;
3318 uint8_t dirty_log_mask;
3319 bool release_lock = false;
3320
3321 rcu_read_lock();
3322 mr = address_space_translate(as, addr, &addr1, &l,
3323 true);
3324 if (l < 4 || !memory_access_is_direct(mr, true)) {
3325 release_lock |= prepare_mmio_access(mr);
3326
3327 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3328 } else {
3329 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3330 stl_p(ptr, val);
3331
3332 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3333 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3334 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3335 4, dirty_log_mask);
3336 r = MEMTX_OK;
3337 }
3338 if (result) {
3339 *result = r;
3340 }
3341 if (release_lock) {
3342 qemu_mutex_unlock_iothread();
3343 }
3344 rcu_read_unlock();
3345 }
3346
3347 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3348 {
3349 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3350 }
3351
3352 /* warning: addr must be aligned */
3353 static inline void address_space_stl_internal(AddressSpace *as,
3354 hwaddr addr, uint32_t val,
3355 MemTxAttrs attrs,
3356 MemTxResult *result,
3357 enum device_endian endian)
3358 {
3359 uint8_t *ptr;
3360 MemoryRegion *mr;
3361 hwaddr l = 4;
3362 hwaddr addr1;
3363 MemTxResult r;
3364 bool release_lock = false;
3365
3366 rcu_read_lock();
3367 mr = address_space_translate(as, addr, &addr1, &l,
3368 true);
3369 if (l < 4 || !memory_access_is_direct(mr, true)) {
3370 release_lock |= prepare_mmio_access(mr);
3371
3372 #if defined(TARGET_WORDS_BIGENDIAN)
3373 if (endian == DEVICE_LITTLE_ENDIAN) {
3374 val = bswap32(val);
3375 }
3376 #else
3377 if (endian == DEVICE_BIG_ENDIAN) {
3378 val = bswap32(val);
3379 }
3380 #endif
3381 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3382 } else {
3383 /* RAM case */
3384 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3385 switch (endian) {
3386 case DEVICE_LITTLE_ENDIAN:
3387 stl_le_p(ptr, val);
3388 break;
3389 case DEVICE_BIG_ENDIAN:
3390 stl_be_p(ptr, val);
3391 break;
3392 default:
3393 stl_p(ptr, val);
3394 break;
3395 }
3396 invalidate_and_set_dirty(mr, addr1, 4);
3397 r = MEMTX_OK;
3398 }
3399 if (result) {
3400 *result = r;
3401 }
3402 if (release_lock) {
3403 qemu_mutex_unlock_iothread();
3404 }
3405 rcu_read_unlock();
3406 }
3407
3408 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3409 MemTxAttrs attrs, MemTxResult *result)
3410 {
3411 address_space_stl_internal(as, addr, val, attrs, result,
3412 DEVICE_NATIVE_ENDIAN);
3413 }
3414
3415 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3416 MemTxAttrs attrs, MemTxResult *result)
3417 {
3418 address_space_stl_internal(as, addr, val, attrs, result,
3419 DEVICE_LITTLE_ENDIAN);
3420 }
3421
3422 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3423 MemTxAttrs attrs, MemTxResult *result)
3424 {
3425 address_space_stl_internal(as, addr, val, attrs, result,
3426 DEVICE_BIG_ENDIAN);
3427 }
3428
3429 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3430 {
3431 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3432 }
3433
3434 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3435 {
3436 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3437 }
3438
3439 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3440 {
3441 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3442 }
3443
3444 /* XXX: optimize */
3445 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3446 MemTxAttrs attrs, MemTxResult *result)
3447 {
3448 uint8_t v = val;
3449 MemTxResult r;
3450
3451 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3452 if (result) {
3453 *result = r;
3454 }
3455 }
3456
3457 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3458 {
3459 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3460 }
3461
3462 /* warning: addr must be aligned */
3463 static inline void address_space_stw_internal(AddressSpace *as,
3464 hwaddr addr, uint32_t val,
3465 MemTxAttrs attrs,
3466 MemTxResult *result,
3467 enum device_endian endian)
3468 {
3469 uint8_t *ptr;
3470 MemoryRegion *mr;
3471 hwaddr l = 2;
3472 hwaddr addr1;
3473 MemTxResult r;
3474 bool release_lock = false;
3475
3476 rcu_read_lock();
3477 mr = address_space_translate(as, addr, &addr1, &l, true);
3478 if (l < 2 || !memory_access_is_direct(mr, true)) {
3479 release_lock |= prepare_mmio_access(mr);
3480
3481 #if defined(TARGET_WORDS_BIGENDIAN)
3482 if (endian == DEVICE_LITTLE_ENDIAN) {
3483 val = bswap16(val);
3484 }
3485 #else
3486 if (endian == DEVICE_BIG_ENDIAN) {
3487 val = bswap16(val);
3488 }
3489 #endif
3490 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3491 } else {
3492 /* RAM case */
3493 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3494 switch (endian) {
3495 case DEVICE_LITTLE_ENDIAN:
3496 stw_le_p(ptr, val);
3497 break;
3498 case DEVICE_BIG_ENDIAN:
3499 stw_be_p(ptr, val);
3500 break;
3501 default:
3502 stw_p(ptr, val);
3503 break;
3504 }
3505 invalidate_and_set_dirty(mr, addr1, 2);
3506 r = MEMTX_OK;
3507 }
3508 if (result) {
3509 *result = r;
3510 }
3511 if (release_lock) {
3512 qemu_mutex_unlock_iothread();
3513 }
3514 rcu_read_unlock();
3515 }
3516
3517 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3518 MemTxAttrs attrs, MemTxResult *result)
3519 {
3520 address_space_stw_internal(as, addr, val, attrs, result,
3521 DEVICE_NATIVE_ENDIAN);
3522 }
3523
3524 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3525 MemTxAttrs attrs, MemTxResult *result)
3526 {
3527 address_space_stw_internal(as, addr, val, attrs, result,
3528 DEVICE_LITTLE_ENDIAN);
3529 }
3530
3531 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3532 MemTxAttrs attrs, MemTxResult *result)
3533 {
3534 address_space_stw_internal(as, addr, val, attrs, result,
3535 DEVICE_BIG_ENDIAN);
3536 }
3537
3538 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3539 {
3540 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3541 }
3542
3543 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3544 {
3545 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3546 }
3547
3548 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3549 {
3550 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3551 }
3552
3553 /* XXX: optimize */
3554 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3555 MemTxAttrs attrs, MemTxResult *result)
3556 {
3557 MemTxResult r;
3558 val = tswap64(val);
3559 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3560 if (result) {
3561 *result = r;
3562 }<