numa: add -numa node,memdev= option
[qemu.git] / include / exec / cpu_ldst.h
1 /*
2 * Software MMU support
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18
19 /*
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
23 *
24 * Used by target op helpers.
25 *
26 * MMU mode suffixes are defined in target cpu.h.
27 */
28 #ifndef CPU_LDST_H
29 #define CPU_LDST_H
30
31 #if defined(CONFIG_USER_ONLY)
32 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
33 #define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
34
35 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
36 #define h2g_valid(x) 1
37 #else
38 #define h2g_valid(x) ({ \
39 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
40 (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
41 (!RESERVED_VA || (__guest < RESERVED_VA)); \
42 })
43 #endif
44
45 #define h2g_nocheck(x) ({ \
46 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
47 (abi_ulong)__ret; \
48 })
49
50 #define h2g(x) ({ \
51 /* Check if given address fits target address space */ \
52 assert(h2g_valid(x)); \
53 h2g_nocheck(x); \
54 })
55
56 #define saddr(x) g2h(x)
57 #define laddr(x) g2h(x)
58
59 #else /* !CONFIG_USER_ONLY */
60 /* NOTE: we use double casts if pointers and target_ulong have
61 different sizes */
62 #define saddr(x) (uint8_t *)(intptr_t)(x)
63 #define laddr(x) (uint8_t *)(intptr_t)(x)
64 #endif
65
66 #define ldub_raw(p) ldub_p(laddr((p)))
67 #define ldsb_raw(p) ldsb_p(laddr((p)))
68 #define lduw_raw(p) lduw_p(laddr((p)))
69 #define ldsw_raw(p) ldsw_p(laddr((p)))
70 #define ldl_raw(p) ldl_p(laddr((p)))
71 #define ldq_raw(p) ldq_p(laddr((p)))
72 #define ldfl_raw(p) ldfl_p(laddr((p)))
73 #define ldfq_raw(p) ldfq_p(laddr((p)))
74 #define stb_raw(p, v) stb_p(saddr((p)), v)
75 #define stw_raw(p, v) stw_p(saddr((p)), v)
76 #define stl_raw(p, v) stl_p(saddr((p)), v)
77 #define stq_raw(p, v) stq_p(saddr((p)), v)
78 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
79 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
80
81
82 #if defined(CONFIG_USER_ONLY)
83
84 /* if user mode, no other memory access functions */
85 #define ldub(p) ldub_raw(p)
86 #define ldsb(p) ldsb_raw(p)
87 #define lduw(p) lduw_raw(p)
88 #define ldsw(p) ldsw_raw(p)
89 #define ldl(p) ldl_raw(p)
90 #define ldq(p) ldq_raw(p)
91 #define ldfl(p) ldfl_raw(p)
92 #define ldfq(p) ldfq_raw(p)
93 #define stb(p, v) stb_raw(p, v)
94 #define stw(p, v) stw_raw(p, v)
95 #define stl(p, v) stl_raw(p, v)
96 #define stq(p, v) stq_raw(p, v)
97 #define stfl(p, v) stfl_raw(p, v)
98 #define stfq(p, v) stfq_raw(p, v)
99
100 #define cpu_ldub_code(env1, p) ldub_raw(p)
101 #define cpu_ldsb_code(env1, p) ldsb_raw(p)
102 #define cpu_lduw_code(env1, p) lduw_raw(p)
103 #define cpu_ldsw_code(env1, p) ldsw_raw(p)
104 #define cpu_ldl_code(env1, p) ldl_raw(p)
105 #define cpu_ldq_code(env1, p) ldq_raw(p)
106
107 #define cpu_ldub_data(env, addr) ldub_raw(addr)
108 #define cpu_lduw_data(env, addr) lduw_raw(addr)
109 #define cpu_ldsw_data(env, addr) ldsw_raw(addr)
110 #define cpu_ldl_data(env, addr) ldl_raw(addr)
111 #define cpu_ldq_data(env, addr) ldq_raw(addr)
112
113 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
114 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
115 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
116 #define cpu_stq_data(env, addr, data) stq_raw(addr, data)
117
118 #define cpu_ldub_kernel(env, addr) ldub_raw(addr)
119 #define cpu_lduw_kernel(env, addr) lduw_raw(addr)
120 #define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
121 #define cpu_ldl_kernel(env, addr) ldl_raw(addr)
122 #define cpu_ldq_kernel(env, addr) ldq_raw(addr)
123
124 #define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
125 #define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
126 #define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
127 #define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
128
129 #define ldub_kernel(p) ldub_raw(p)
130 #define ldsb_kernel(p) ldsb_raw(p)
131 #define lduw_kernel(p) lduw_raw(p)
132 #define ldsw_kernel(p) ldsw_raw(p)
133 #define ldl_kernel(p) ldl_raw(p)
134 #define ldq_kernel(p) ldq_raw(p)
135 #define ldfl_kernel(p) ldfl_raw(p)
136 #define ldfq_kernel(p) ldfq_raw(p)
137 #define stb_kernel(p, v) stb_raw(p, v)
138 #define stw_kernel(p, v) stw_raw(p, v)
139 #define stl_kernel(p, v) stl_raw(p, v)
140 #define stq_kernel(p, v) stq_raw(p, v)
141 #define stfl_kernel(p, v) stfl_raw(p, v)
142 #define stfq_kernel(p, vt) stfq_raw(p, v)
143
144 #define cpu_ldub_data(env, addr) ldub_raw(addr)
145 #define cpu_lduw_data(env, addr) lduw_raw(addr)
146 #define cpu_ldl_data(env, addr) ldl_raw(addr)
147
148 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
149 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
150 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
151
152 #else
153
154 /* XXX: find something cleaner.
155 * Furthermore, this is false for 64 bits targets
156 */
157 #define ldul_user ldl_user
158 #define ldul_kernel ldl_kernel
159 #define ldul_hypv ldl_hypv
160 #define ldul_executive ldl_executive
161 #define ldul_supervisor ldl_supervisor
162
163 /* The memory helpers for tcg-generated code need tcg_target_long etc. */
164 #include "tcg.h"
165
166 uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
167 uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
168 uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
169 uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
170
171 void helper_stb_mmu(CPUArchState *env, target_ulong addr,
172 uint8_t val, int mmu_idx);
173 void helper_stw_mmu(CPUArchState *env, target_ulong addr,
174 uint16_t val, int mmu_idx);
175 void helper_stl_mmu(CPUArchState *env, target_ulong addr,
176 uint32_t val, int mmu_idx);
177 void helper_stq_mmu(CPUArchState *env, target_ulong addr,
178 uint64_t val, int mmu_idx);
179
180 uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
181 uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
182 uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
183 uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
184
185 #define CPU_MMU_INDEX 0
186 #define MEMSUFFIX MMU_MODE0_SUFFIX
187 #define DATA_SIZE 1
188 #include "exec/cpu_ldst_template.h"
189
190 #define DATA_SIZE 2
191 #include "exec/cpu_ldst_template.h"
192
193 #define DATA_SIZE 4
194 #include "exec/cpu_ldst_template.h"
195
196 #define DATA_SIZE 8
197 #include "exec/cpu_ldst_template.h"
198 #undef CPU_MMU_INDEX
199 #undef MEMSUFFIX
200
201 #define CPU_MMU_INDEX 1
202 #define MEMSUFFIX MMU_MODE1_SUFFIX
203 #define DATA_SIZE 1
204 #include "exec/cpu_ldst_template.h"
205
206 #define DATA_SIZE 2
207 #include "exec/cpu_ldst_template.h"
208
209 #define DATA_SIZE 4
210 #include "exec/cpu_ldst_template.h"
211
212 #define DATA_SIZE 8
213 #include "exec/cpu_ldst_template.h"
214 #undef CPU_MMU_INDEX
215 #undef MEMSUFFIX
216
217 #if (NB_MMU_MODES >= 3)
218
219 #define CPU_MMU_INDEX 2
220 #define MEMSUFFIX MMU_MODE2_SUFFIX
221 #define DATA_SIZE 1
222 #include "exec/cpu_ldst_template.h"
223
224 #define DATA_SIZE 2
225 #include "exec/cpu_ldst_template.h"
226
227 #define DATA_SIZE 4
228 #include "exec/cpu_ldst_template.h"
229
230 #define DATA_SIZE 8
231 #include "exec/cpu_ldst_template.h"
232 #undef CPU_MMU_INDEX
233 #undef MEMSUFFIX
234 #endif /* (NB_MMU_MODES >= 3) */
235
236 #if (NB_MMU_MODES >= 4)
237
238 #define CPU_MMU_INDEX 3
239 #define MEMSUFFIX MMU_MODE3_SUFFIX
240 #define DATA_SIZE 1
241 #include "exec/cpu_ldst_template.h"
242
243 #define DATA_SIZE 2
244 #include "exec/cpu_ldst_template.h"
245
246 #define DATA_SIZE 4
247 #include "exec/cpu_ldst_template.h"
248
249 #define DATA_SIZE 8
250 #include "exec/cpu_ldst_template.h"
251 #undef CPU_MMU_INDEX
252 #undef MEMSUFFIX
253 #endif /* (NB_MMU_MODES >= 4) */
254
255 #if (NB_MMU_MODES >= 5)
256
257 #define CPU_MMU_INDEX 4
258 #define MEMSUFFIX MMU_MODE4_SUFFIX
259 #define DATA_SIZE 1
260 #include "exec/cpu_ldst_template.h"
261
262 #define DATA_SIZE 2
263 #include "exec/cpu_ldst_template.h"
264
265 #define DATA_SIZE 4
266 #include "exec/cpu_ldst_template.h"
267
268 #define DATA_SIZE 8
269 #include "exec/cpu_ldst_template.h"
270 #undef CPU_MMU_INDEX
271 #undef MEMSUFFIX
272 #endif /* (NB_MMU_MODES >= 5) */
273
274 #if (NB_MMU_MODES >= 6)
275
276 #define CPU_MMU_INDEX 5
277 #define MEMSUFFIX MMU_MODE5_SUFFIX
278 #define DATA_SIZE 1
279 #include "exec/cpu_ldst_template.h"
280
281 #define DATA_SIZE 2
282 #include "exec/cpu_ldst_template.h"
283
284 #define DATA_SIZE 4
285 #include "exec/cpu_ldst_template.h"
286
287 #define DATA_SIZE 8
288 #include "exec/cpu_ldst_template.h"
289 #undef CPU_MMU_INDEX
290 #undef MEMSUFFIX
291 #endif /* (NB_MMU_MODES >= 6) */
292
293 #if (NB_MMU_MODES > 6)
294 #error "NB_MMU_MODES > 6 is not supported for now"
295 #endif /* (NB_MMU_MODES > 6) */
296
297 /* these access are slower, they must be as rare as possible */
298 #define CPU_MMU_INDEX (cpu_mmu_index(env))
299 #define MEMSUFFIX _data
300 #define DATA_SIZE 1
301 #include "exec/cpu_ldst_template.h"
302
303 #define DATA_SIZE 2
304 #include "exec/cpu_ldst_template.h"
305
306 #define DATA_SIZE 4
307 #include "exec/cpu_ldst_template.h"
308
309 #define DATA_SIZE 8
310 #include "exec/cpu_ldst_template.h"
311 #undef CPU_MMU_INDEX
312 #undef MEMSUFFIX
313
314 #define ldub(p) ldub_data(p)
315 #define ldsb(p) ldsb_data(p)
316 #define lduw(p) lduw_data(p)
317 #define ldsw(p) ldsw_data(p)
318 #define ldl(p) ldl_data(p)
319 #define ldq(p) ldq_data(p)
320
321 #define stb(p, v) stb_data(p, v)
322 #define stw(p, v) stw_data(p, v)
323 #define stl(p, v) stl_data(p, v)
324 #define stq(p, v) stq_data(p, v)
325
326 #define CPU_MMU_INDEX (cpu_mmu_index(env))
327 #define MEMSUFFIX _code
328 #define SOFTMMU_CODE_ACCESS
329
330 #define DATA_SIZE 1
331 #include "exec/cpu_ldst_template.h"
332
333 #define DATA_SIZE 2
334 #include "exec/cpu_ldst_template.h"
335
336 #define DATA_SIZE 4
337 #include "exec/cpu_ldst_template.h"
338
339 #define DATA_SIZE 8
340 #include "exec/cpu_ldst_template.h"
341
342 #undef CPU_MMU_INDEX
343 #undef MEMSUFFIX
344 #undef SOFTMMU_CODE_ACCESS
345
346 /**
347 * tlb_vaddr_to_host:
348 * @env: CPUArchState
349 * @addr: guest virtual address to look up
350 * @access_type: 0 for read, 1 for write, 2 for execute
351 * @mmu_idx: MMU index to use for lookup
352 *
353 * Look up the specified guest virtual index in the TCG softmmu TLB.
354 * If the TLB contains a host virtual address suitable for direct RAM
355 * access, then return it. Otherwise (TLB miss, TLB entry is for an
356 * I/O access, etc) return NULL.
357 *
358 * This is the equivalent of the initial fast-path code used by
359 * TCG backends for guest load and store accesses.
360 */
361 static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
362 int access_type, int mmu_idx)
363 {
364 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
365 CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
366 target_ulong tlb_addr;
367 uintptr_t haddr;
368
369 switch (access_type) {
370 case 0:
371 tlb_addr = tlbentry->addr_read;
372 break;
373 case 1:
374 tlb_addr = tlbentry->addr_write;
375 break;
376 case 2:
377 tlb_addr = tlbentry->addr_code;
378 break;
379 default:
380 g_assert_not_reached();
381 }
382
383 if ((addr & TARGET_PAGE_MASK)
384 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
385 /* TLB entry is for a different page */
386 return NULL;
387 }
388
389 if (tlb_addr & ~TARGET_PAGE_MASK) {
390 /* IO access */
391 return NULL;
392 }
393
394 haddr = addr + env->tlb_table[mmu_idx][index].addend;
395 return (void *)haddr;
396 }
397
398 #endif /* defined(CONFIG_USER_ONLY) */
399
400 #endif /* CPU_LDST_H */