hw/arm/virt: parameter passing cleanups
[qemu.git] / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #if DATA_SIZE == 8
25 #define SUFFIX q
26 #define LSUFFIX q
27 #define SDATA_TYPE int64_t
28 #define DATA_TYPE uint64_t
29 #elif DATA_SIZE == 4
30 #define SUFFIX l
31 #define LSUFFIX l
32 #define SDATA_TYPE int32_t
33 #define DATA_TYPE uint32_t
34 #elif DATA_SIZE == 2
35 #define SUFFIX w
36 #define LSUFFIX uw
37 #define SDATA_TYPE int16_t
38 #define DATA_TYPE uint16_t
39 #elif DATA_SIZE == 1
40 #define SUFFIX b
41 #define LSUFFIX ub
42 #define SDATA_TYPE int8_t
43 #define DATA_TYPE uint8_t
44 #else
45 #error unsupported data size
46 #endif
47
48
49 /* For the benefit of TCG generated code, we want to avoid the complication
50 of ABI-specific return type promotion and always return a value extended
51 to the register size of the host. This is tcg_target_long, except in the
52 case of a 32-bit host and 64-bit data, and for that we always have
53 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
54 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55 # define WORD_TYPE DATA_TYPE
56 # define USUFFIX SUFFIX
57 #else
58 # define WORD_TYPE tcg_target_ulong
59 # define USUFFIX glue(u, SUFFIX)
60 # define SSUFFIX glue(s, SUFFIX)
61 #endif
62
63 #ifdef SOFTMMU_CODE_ACCESS
64 #define READ_ACCESS_TYPE MMU_INST_FETCH
65 #define ADDR_READ addr_code
66 #else
67 #define READ_ACCESS_TYPE MMU_DATA_LOAD
68 #define ADDR_READ addr_read
69 #endif
70
71 #if DATA_SIZE == 8
72 # define BSWAP(X) bswap64(X)
73 #elif DATA_SIZE == 4
74 # define BSWAP(X) bswap32(X)
75 #elif DATA_SIZE == 2
76 # define BSWAP(X) bswap16(X)
77 #else
78 # define BSWAP(X) (X)
79 #endif
80
81 #if DATA_SIZE == 1
82 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
83 # define helper_be_ld_name helper_le_ld_name
84 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
85 # define helper_be_lds_name helper_le_lds_name
86 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
87 # define helper_be_st_name helper_le_st_name
88 #else
89 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
90 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
91 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
92 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
93 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
94 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
95 #endif
96
97 #ifndef SOFTMMU_CODE_ACCESS
98 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
99 size_t mmu_idx, size_t index,
100 target_ulong addr,
101 uintptr_t retaddr)
102 {
103 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
104 return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE);
105 }
106 #endif
107
108 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
109 TCGMemOpIdx oi, uintptr_t retaddr)
110 {
111 unsigned mmu_idx = get_mmuidx(oi);
112 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
113 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
114 unsigned a_bits = get_alignment_bits(get_memop(oi));
115 uintptr_t haddr;
116 DATA_TYPE res;
117
118 if (addr & ((1 << a_bits) - 1)) {
119 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
120 mmu_idx, retaddr);
121 }
122
123 /* If the TLB entry is for a different page, reload and try again. */
124 if ((addr & TARGET_PAGE_MASK)
125 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
126 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
127 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
128 mmu_idx, retaddr);
129 }
130 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
131 }
132
133 /* Handle an IO access. */
134 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
135 if ((addr & (DATA_SIZE - 1)) != 0) {
136 goto do_unaligned_access;
137 }
138
139 /* ??? Note that the io helpers always read data in the target
140 byte ordering. We should push the LE/BE request down into io. */
141 res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
142 res = TGT_LE(res);
143 return res;
144 }
145
146 /* Handle slow unaligned access (it spans two pages or IO). */
147 if (DATA_SIZE > 1
148 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
149 >= TARGET_PAGE_SIZE)) {
150 target_ulong addr1, addr2;
151 DATA_TYPE res1, res2;
152 unsigned shift;
153 do_unaligned_access:
154 addr1 = addr & ~(DATA_SIZE - 1);
155 addr2 = addr1 + DATA_SIZE;
156 res1 = helper_le_ld_name(env, addr1, oi, retaddr);
157 res2 = helper_le_ld_name(env, addr2, oi, retaddr);
158 shift = (addr & (DATA_SIZE - 1)) * 8;
159
160 /* Little-endian combine. */
161 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
162 return res;
163 }
164
165 haddr = addr + env->tlb_table[mmu_idx][index].addend;
166 #if DATA_SIZE == 1
167 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
168 #else
169 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
170 #endif
171 return res;
172 }
173
174 #if DATA_SIZE > 1
175 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
176 TCGMemOpIdx oi, uintptr_t retaddr)
177 {
178 unsigned mmu_idx = get_mmuidx(oi);
179 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
180 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
181 unsigned a_bits = get_alignment_bits(get_memop(oi));
182 uintptr_t haddr;
183 DATA_TYPE res;
184
185 if (addr & ((1 << a_bits) - 1)) {
186 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
187 mmu_idx, retaddr);
188 }
189
190 /* If the TLB entry is for a different page, reload and try again. */
191 if ((addr & TARGET_PAGE_MASK)
192 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
193 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
194 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
195 mmu_idx, retaddr);
196 }
197 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
198 }
199
200 /* Handle an IO access. */
201 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
202 if ((addr & (DATA_SIZE - 1)) != 0) {
203 goto do_unaligned_access;
204 }
205
206 /* ??? Note that the io helpers always read data in the target
207 byte ordering. We should push the LE/BE request down into io. */
208 res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
209 res = TGT_BE(res);
210 return res;
211 }
212
213 /* Handle slow unaligned access (it spans two pages or IO). */
214 if (DATA_SIZE > 1
215 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
216 >= TARGET_PAGE_SIZE)) {
217 target_ulong addr1, addr2;
218 DATA_TYPE res1, res2;
219 unsigned shift;
220 do_unaligned_access:
221 addr1 = addr & ~(DATA_SIZE - 1);
222 addr2 = addr1 + DATA_SIZE;
223 res1 = helper_be_ld_name(env, addr1, oi, retaddr);
224 res2 = helper_be_ld_name(env, addr2, oi, retaddr);
225 shift = (addr & (DATA_SIZE - 1)) * 8;
226
227 /* Big-endian combine. */
228 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
229 return res;
230 }
231
232 haddr = addr + env->tlb_table[mmu_idx][index].addend;
233 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
234 return res;
235 }
236 #endif /* DATA_SIZE > 1 */
237
238 #ifndef SOFTMMU_CODE_ACCESS
239
240 /* Provide signed versions of the load routines as well. We can of course
241 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
242 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
243 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
244 TCGMemOpIdx oi, uintptr_t retaddr)
245 {
246 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
247 }
248
249 # if DATA_SIZE > 1
250 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
251 TCGMemOpIdx oi, uintptr_t retaddr)
252 {
253 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
254 }
255 # endif
256 #endif
257
258 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
259 size_t mmu_idx, size_t index,
260 DATA_TYPE val,
261 target_ulong addr,
262 uintptr_t retaddr)
263 {
264 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
265 return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE);
266 }
267
268 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
269 TCGMemOpIdx oi, uintptr_t retaddr)
270 {
271 unsigned mmu_idx = get_mmuidx(oi);
272 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
273 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
274 unsigned a_bits = get_alignment_bits(get_memop(oi));
275 uintptr_t haddr;
276
277 if (addr & ((1 << a_bits) - 1)) {
278 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
279 mmu_idx, retaddr);
280 }
281
282 /* If the TLB entry is for a different page, reload and try again. */
283 if ((addr & TARGET_PAGE_MASK)
284 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
285 if (!VICTIM_TLB_HIT(addr_write, addr)) {
286 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
287 }
288 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
289 }
290
291 /* Handle an IO access. */
292 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
293 if ((addr & (DATA_SIZE - 1)) != 0) {
294 goto do_unaligned_access;
295 }
296
297 /* ??? Note that the io helpers always read data in the target
298 byte ordering. We should push the LE/BE request down into io. */
299 val = TGT_LE(val);
300 glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
301 return;
302 }
303
304 /* Handle slow unaligned access (it spans two pages or IO). */
305 if (DATA_SIZE > 1
306 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
307 >= TARGET_PAGE_SIZE)) {
308 int i, index2;
309 target_ulong page2, tlb_addr2;
310 do_unaligned_access:
311 /* Ensure the second page is in the TLB. Note that the first page
312 is already guaranteed to be filled, and that the second page
313 cannot evict the first. */
314 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
315 index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
316 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
317 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
318 && !VICTIM_TLB_HIT(addr_write, page2)) {
319 tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
320 mmu_idx, retaddr);
321 }
322
323 /* XXX: not efficient, but simple. */
324 /* This loop must go in the forward direction to avoid issues
325 with self-modifying code in Windows 64-bit. */
326 for (i = 0; i < DATA_SIZE; ++i) {
327 /* Little-endian extract. */
328 uint8_t val8 = val >> (i * 8);
329 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
330 oi, retaddr);
331 }
332 return;
333 }
334
335 haddr = addr + env->tlb_table[mmu_idx][index].addend;
336 #if DATA_SIZE == 1
337 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
338 #else
339 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
340 #endif
341 }
342
343 #if DATA_SIZE > 1
344 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
345 TCGMemOpIdx oi, uintptr_t retaddr)
346 {
347 unsigned mmu_idx = get_mmuidx(oi);
348 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
349 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
350 unsigned a_bits = get_alignment_bits(get_memop(oi));
351 uintptr_t haddr;
352
353 if (addr & ((1 << a_bits) - 1)) {
354 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
355 mmu_idx, retaddr);
356 }
357
358 /* If the TLB entry is for a different page, reload and try again. */
359 if ((addr & TARGET_PAGE_MASK)
360 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
361 if (!VICTIM_TLB_HIT(addr_write, addr)) {
362 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
363 }
364 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
365 }
366
367 /* Handle an IO access. */
368 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
369 if ((addr & (DATA_SIZE - 1)) != 0) {
370 goto do_unaligned_access;
371 }
372
373 /* ??? Note that the io helpers always read data in the target
374 byte ordering. We should push the LE/BE request down into io. */
375 val = TGT_BE(val);
376 glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
377 return;
378 }
379
380 /* Handle slow unaligned access (it spans two pages or IO). */
381 if (DATA_SIZE > 1
382 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
383 >= TARGET_PAGE_SIZE)) {
384 int i, index2;
385 target_ulong page2, tlb_addr2;
386 do_unaligned_access:
387 /* Ensure the second page is in the TLB. Note that the first page
388 is already guaranteed to be filled, and that the second page
389 cannot evict the first. */
390 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
391 index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
392 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
393 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
394 && !VICTIM_TLB_HIT(addr_write, page2)) {
395 tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
396 mmu_idx, retaddr);
397 }
398
399 /* XXX: not efficient, but simple */
400 /* This loop must go in the forward direction to avoid issues
401 with self-modifying code. */
402 for (i = 0; i < DATA_SIZE; ++i) {
403 /* Big-endian extract. */
404 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
405 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
406 oi, retaddr);
407 }
408 return;
409 }
410
411 haddr = addr + env->tlb_table[mmu_idx][index].addend;
412 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
413 }
414 #endif /* DATA_SIZE > 1 */
415 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
416
417 #undef READ_ACCESS_TYPE
418 #undef DATA_TYPE
419 #undef SUFFIX
420 #undef LSUFFIX
421 #undef DATA_SIZE
422 #undef ADDR_READ
423 #undef WORD_TYPE
424 #undef SDATA_TYPE
425 #undef USUFFIX
426 #undef SSUFFIX
427 #undef BSWAP
428 #undef helper_le_ld_name
429 #undef helper_be_ld_name
430 #undef helper_le_lds_name
431 #undef helper_be_lds_name
432 #undef helper_le_st_name
433 #undef helper_be_st_name