hw/net: fix mcf_fec driver receiver
[qemu.git] / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
27
28 #define DATA_SIZE (1 << SHIFT)
29
30 #if DATA_SIZE == 8
31 #define SUFFIX q
32 #define LSUFFIX q
33 #define SDATA_TYPE int64_t
34 #define DATA_TYPE uint64_t
35 #elif DATA_SIZE == 4
36 #define SUFFIX l
37 #define LSUFFIX l
38 #define SDATA_TYPE int32_t
39 #define DATA_TYPE uint32_t
40 #elif DATA_SIZE == 2
41 #define SUFFIX w
42 #define LSUFFIX uw
43 #define SDATA_TYPE int16_t
44 #define DATA_TYPE uint16_t
45 #elif DATA_SIZE == 1
46 #define SUFFIX b
47 #define LSUFFIX ub
48 #define SDATA_TYPE int8_t
49 #define DATA_TYPE uint8_t
50 #else
51 #error unsupported data size
52 #endif
53
54
55 /* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61 # define WORD_TYPE DATA_TYPE
62 # define USUFFIX SUFFIX
63 #else
64 # define WORD_TYPE tcg_target_ulong
65 # define USUFFIX glue(u, SUFFIX)
66 # define SSUFFIX glue(s, SUFFIX)
67 #endif
68
69 #ifdef SOFTMMU_CODE_ACCESS
70 #define READ_ACCESS_TYPE MMU_INST_FETCH
71 #define ADDR_READ addr_code
72 #else
73 #define READ_ACCESS_TYPE MMU_DATA_LOAD
74 #define ADDR_READ addr_read
75 #endif
76
77 #if DATA_SIZE == 8
78 # define BSWAP(X) bswap64(X)
79 #elif DATA_SIZE == 4
80 # define BSWAP(X) bswap32(X)
81 #elif DATA_SIZE == 2
82 # define BSWAP(X) bswap16(X)
83 #else
84 # define BSWAP(X) (X)
85 #endif
86
87 #ifdef TARGET_WORDS_BIGENDIAN
88 # define TGT_BE(X) (X)
89 # define TGT_LE(X) BSWAP(X)
90 #else
91 # define TGT_BE(X) BSWAP(X)
92 # define TGT_LE(X) (X)
93 #endif
94
95 #if DATA_SIZE == 1
96 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97 # define helper_be_ld_name helper_le_ld_name
98 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99 # define helper_be_lds_name helper_le_lds_name
100 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101 # define helper_be_st_name helper_le_st_name
102 #else
103 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
109 #endif
110
111 #ifdef TARGET_WORDS_BIGENDIAN
112 # define helper_te_ld_name helper_be_ld_name
113 # define helper_te_st_name helper_be_st_name
114 #else
115 # define helper_te_ld_name helper_le_ld_name
116 # define helper_te_st_name helper_le_st_name
117 #endif
118
119 /* macro to check the victim tlb */
120 #define VICTIM_TLB_HIT(ty) \
121 ({ \
122 /* we are about to do a page table walk. our last hope is the \
123 * victim tlb. try to refill from the victim tlb before walking the \
124 * page table. */ \
125 int vidx; \
126 CPUIOTLBEntry tmpiotlb; \
127 CPUTLBEntry tmptlb; \
128 for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
129 if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
130 /* found entry in victim tlb, swap tlb and iotlb */ \
131 tmptlb = env->tlb_table[mmu_idx][index]; \
132 env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
133 env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
134 tmpiotlb = env->iotlb[mmu_idx][index]; \
135 env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
136 env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
137 break; \
138 } \
139 } \
140 /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
141 vidx >= 0; \
142 })
143
144 #ifndef SOFTMMU_CODE_ACCESS
145 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
146 CPUIOTLBEntry *iotlbentry,
147 target_ulong addr,
148 uintptr_t retaddr)
149 {
150 uint64_t val;
151 CPUState *cpu = ENV_GET_CPU(env);
152 hwaddr physaddr = iotlbentry->addr;
153 MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
154
155 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
156 cpu->mem_io_pc = retaddr;
157 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
158 cpu_io_recompile(cpu, retaddr);
159 }
160
161 cpu->mem_io_vaddr = addr;
162 memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
163 iotlbentry->attrs);
164 return val;
165 }
166 #endif
167
168 #ifdef SOFTMMU_CODE_ACCESS
169 static __attribute__((unused))
170 #endif
171 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
172 TCGMemOpIdx oi, uintptr_t retaddr)
173 {
174 unsigned mmu_idx = get_mmuidx(oi);
175 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
176 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
177 uintptr_t haddr;
178 DATA_TYPE res;
179
180 /* Adjust the given return address. */
181 retaddr -= GETPC_ADJ;
182
183 /* If the TLB entry is for a different page, reload and try again. */
184 if ((addr & TARGET_PAGE_MASK)
185 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
186 if ((addr & (DATA_SIZE - 1)) != 0
187 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
188 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
189 mmu_idx, retaddr);
190 }
191 if (!VICTIM_TLB_HIT(ADDR_READ)) {
192 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
193 mmu_idx, retaddr);
194 }
195 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
196 }
197
198 /* Handle an IO access. */
199 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
200 CPUIOTLBEntry *iotlbentry;
201 if ((addr & (DATA_SIZE - 1)) != 0) {
202 goto do_unaligned_access;
203 }
204 iotlbentry = &env->iotlb[mmu_idx][index];
205
206 /* ??? Note that the io helpers always read data in the target
207 byte ordering. We should push the LE/BE request down into io. */
208 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
209 res = TGT_LE(res);
210 return res;
211 }
212
213 /* Handle slow unaligned access (it spans two pages or IO). */
214 if (DATA_SIZE > 1
215 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
216 >= TARGET_PAGE_SIZE)) {
217 target_ulong addr1, addr2;
218 DATA_TYPE res1, res2;
219 unsigned shift;
220 do_unaligned_access:
221 if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
222 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
223 mmu_idx, retaddr);
224 }
225 addr1 = addr & ~(DATA_SIZE - 1);
226 addr2 = addr1 + DATA_SIZE;
227 /* Note the adjustment at the beginning of the function.
228 Undo that for the recursion. */
229 res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
230 res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
231 shift = (addr & (DATA_SIZE - 1)) * 8;
232
233 /* Little-endian combine. */
234 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
235 return res;
236 }
237
238 /* Handle aligned access or unaligned access in the same page. */
239 if ((addr & (DATA_SIZE - 1)) != 0
240 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
241 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
242 mmu_idx, retaddr);
243 }
244
245 haddr = addr + env->tlb_table[mmu_idx][index].addend;
246 #if DATA_SIZE == 1
247 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
248 #else
249 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
250 #endif
251 return res;
252 }
253
254 #if DATA_SIZE > 1
255 #ifdef SOFTMMU_CODE_ACCESS
256 static __attribute__((unused))
257 #endif
258 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
259 TCGMemOpIdx oi, uintptr_t retaddr)
260 {
261 unsigned mmu_idx = get_mmuidx(oi);
262 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
263 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
264 uintptr_t haddr;
265 DATA_TYPE res;
266
267 /* Adjust the given return address. */
268 retaddr -= GETPC_ADJ;
269
270 /* If the TLB entry is for a different page, reload and try again. */
271 if ((addr & TARGET_PAGE_MASK)
272 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
273 if ((addr & (DATA_SIZE - 1)) != 0
274 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
275 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
276 mmu_idx, retaddr);
277 }
278 if (!VICTIM_TLB_HIT(ADDR_READ)) {
279 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
280 mmu_idx, retaddr);
281 }
282 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
283 }
284
285 /* Handle an IO access. */
286 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
287 CPUIOTLBEntry *iotlbentry;
288 if ((addr & (DATA_SIZE - 1)) != 0) {
289 goto do_unaligned_access;
290 }
291 iotlbentry = &env->iotlb[mmu_idx][index];
292
293 /* ??? Note that the io helpers always read data in the target
294 byte ordering. We should push the LE/BE request down into io. */
295 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
296 res = TGT_BE(res);
297 return res;
298 }
299
300 /* Handle slow unaligned access (it spans two pages or IO). */
301 if (DATA_SIZE > 1
302 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
303 >= TARGET_PAGE_SIZE)) {
304 target_ulong addr1, addr2;
305 DATA_TYPE res1, res2;
306 unsigned shift;
307 do_unaligned_access:
308 if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
309 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
310 mmu_idx, retaddr);
311 }
312 addr1 = addr & ~(DATA_SIZE - 1);
313 addr2 = addr1 + DATA_SIZE;
314 /* Note the adjustment at the beginning of the function.
315 Undo that for the recursion. */
316 res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
317 res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
318 shift = (addr & (DATA_SIZE - 1)) * 8;
319
320 /* Big-endian combine. */
321 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
322 return res;
323 }
324
325 /* Handle aligned access or unaligned access in the same page. */
326 if ((addr & (DATA_SIZE - 1)) != 0
327 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
328 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
329 mmu_idx, retaddr);
330 }
331
332 haddr = addr + env->tlb_table[mmu_idx][index].addend;
333 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
334 return res;
335 }
336 #endif /* DATA_SIZE > 1 */
337
338 DATA_TYPE
339 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
340 int mmu_idx)
341 {
342 TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
343 return helper_te_ld_name (env, addr, oi, GETRA());
344 }
345
346 #ifndef SOFTMMU_CODE_ACCESS
347
348 /* Provide signed versions of the load routines as well. We can of course
349 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
350 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
351 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
352 TCGMemOpIdx oi, uintptr_t retaddr)
353 {
354 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
355 }
356
357 # if DATA_SIZE > 1
358 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
359 TCGMemOpIdx oi, uintptr_t retaddr)
360 {
361 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
362 }
363 # endif
364 #endif
365
366 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
367 CPUIOTLBEntry *iotlbentry,
368 DATA_TYPE val,
369 target_ulong addr,
370 uintptr_t retaddr)
371 {
372 CPUState *cpu = ENV_GET_CPU(env);
373 hwaddr physaddr = iotlbentry->addr;
374 MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
375
376 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
377 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
378 cpu_io_recompile(cpu, retaddr);
379 }
380
381 cpu->mem_io_vaddr = addr;
382 cpu->mem_io_pc = retaddr;
383 memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
384 iotlbentry->attrs);
385 }
386
387 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
388 TCGMemOpIdx oi, uintptr_t retaddr)
389 {
390 unsigned mmu_idx = get_mmuidx(oi);
391 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
392 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
393 uintptr_t haddr;
394
395 /* Adjust the given return address. */
396 retaddr -= GETPC_ADJ;
397
398 /* If the TLB entry is for a different page, reload and try again. */
399 if ((addr & TARGET_PAGE_MASK)
400 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
401 if ((addr & (DATA_SIZE - 1)) != 0
402 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
403 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
404 mmu_idx, retaddr);
405 }
406 if (!VICTIM_TLB_HIT(addr_write)) {
407 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
408 }
409 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
410 }
411
412 /* Handle an IO access. */
413 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
414 CPUIOTLBEntry *iotlbentry;
415 if ((addr & (DATA_SIZE - 1)) != 0) {
416 goto do_unaligned_access;
417 }
418 iotlbentry = &env->iotlb[mmu_idx][index];
419
420 /* ??? Note that the io helpers always read data in the target
421 byte ordering. We should push the LE/BE request down into io. */
422 val = TGT_LE(val);
423 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
424 return;
425 }
426
427 /* Handle slow unaligned access (it spans two pages or IO). */
428 if (DATA_SIZE > 1
429 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
430 >= TARGET_PAGE_SIZE)) {
431 int i;
432 do_unaligned_access:
433 if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
434 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
435 mmu_idx, retaddr);
436 }
437 /* XXX: not efficient, but simple */
438 /* Note: relies on the fact that tlb_fill() does not remove the
439 * previous page from the TLB cache. */
440 for (i = DATA_SIZE - 1; i >= 0; i--) {
441 /* Little-endian extract. */
442 uint8_t val8 = val >> (i * 8);
443 /* Note the adjustment at the beginning of the function.
444 Undo that for the recursion. */
445 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
446 oi, retaddr + GETPC_ADJ);
447 }
448 return;
449 }
450
451 /* Handle aligned access or unaligned access in the same page. */
452 if ((addr & (DATA_SIZE - 1)) != 0
453 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
454 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
455 mmu_idx, retaddr);
456 }
457
458 haddr = addr + env->tlb_table[mmu_idx][index].addend;
459 #if DATA_SIZE == 1
460 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
461 #else
462 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
463 #endif
464 }
465
466 #if DATA_SIZE > 1
467 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
468 TCGMemOpIdx oi, uintptr_t retaddr)
469 {
470 unsigned mmu_idx = get_mmuidx(oi);
471 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
472 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
473 uintptr_t haddr;
474
475 /* Adjust the given return address. */
476 retaddr -= GETPC_ADJ;
477
478 /* If the TLB entry is for a different page, reload and try again. */
479 if ((addr & TARGET_PAGE_MASK)
480 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
481 if ((addr & (DATA_SIZE - 1)) != 0
482 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
483 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
484 mmu_idx, retaddr);
485 }
486 if (!VICTIM_TLB_HIT(addr_write)) {
487 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
488 }
489 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
490 }
491
492 /* Handle an IO access. */
493 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
494 CPUIOTLBEntry *iotlbentry;
495 if ((addr & (DATA_SIZE - 1)) != 0) {
496 goto do_unaligned_access;
497 }
498 iotlbentry = &env->iotlb[mmu_idx][index];
499
500 /* ??? Note that the io helpers always read data in the target
501 byte ordering. We should push the LE/BE request down into io. */
502 val = TGT_BE(val);
503 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
504 return;
505 }
506
507 /* Handle slow unaligned access (it spans two pages or IO). */
508 if (DATA_SIZE > 1
509 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
510 >= TARGET_PAGE_SIZE)) {
511 int i;
512 do_unaligned_access:
513 if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
514 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
515 mmu_idx, retaddr);
516 }
517 /* XXX: not efficient, but simple */
518 /* Note: relies on the fact that tlb_fill() does not remove the
519 * previous page from the TLB cache. */
520 for (i = DATA_SIZE - 1; i >= 0; i--) {
521 /* Big-endian extract. */
522 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
523 /* Note the adjustment at the beginning of the function.
524 Undo that for the recursion. */
525 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
526 oi, retaddr + GETPC_ADJ);
527 }
528 return;
529 }
530
531 /* Handle aligned access or unaligned access in the same page. */
532 if ((addr & (DATA_SIZE - 1)) != 0
533 && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
534 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
535 mmu_idx, retaddr);
536 }
537
538 haddr = addr + env->tlb_table[mmu_idx][index].addend;
539 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
540 }
541 #endif /* DATA_SIZE > 1 */
542
543 void
544 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
545 DATA_TYPE val, int mmu_idx)
546 {
547 TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
548 helper_te_st_name(env, addr, val, oi, GETRA());
549 }
550
551 #if DATA_SIZE == 1
552 /* Probe for whether the specified guest write access is permitted.
553 * If it is not permitted then an exception will be taken in the same
554 * way as if this were a real write access (and we will not return).
555 * Otherwise the function will return, and there will be a valid
556 * entry in the TLB for this access.
557 */
558 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
559 uintptr_t retaddr)
560 {
561 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
562 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
563
564 if ((addr & TARGET_PAGE_MASK)
565 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
566 /* TLB entry is for a different page */
567 if (!VICTIM_TLB_HIT(addr_write)) {
568 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
569 }
570 }
571 }
572 #endif
573 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
574
575 #undef READ_ACCESS_TYPE
576 #undef SHIFT
577 #undef DATA_TYPE
578 #undef SUFFIX
579 #undef LSUFFIX
580 #undef DATA_SIZE
581 #undef ADDR_READ
582 #undef WORD_TYPE
583 #undef SDATA_TYPE
584 #undef USUFFIX
585 #undef SSUFFIX
586 #undef BSWAP
587 #undef TGT_BE
588 #undef TGT_LE
589 #undef CPU_BE
590 #undef CPU_LE
591 #undef helper_le_ld_name
592 #undef helper_be_ld_name
593 #undef helper_le_lds_name
594 #undef helper_be_lds_name
595 #undef helper_le_st_name
596 #undef helper_be_st_name
597 #undef helper_te_ld_name
598 #undef helper_te_st_name