vmsvga: don't process more than 1024 fifo commands at once
[qemu.git] / target-ppc / mmu-hash64.c
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20 #include "qemu/osdep.h"
21 #include "qapi/error.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/error-report.h"
26 #include "sysemu/kvm.h"
27 #include "qemu/error-report.h"
28 #include "kvm_ppc.h"
29 #include "mmu-hash64.h"
30 #include "exec/log.h"
31
32 //#define DEBUG_SLB
33
34 #ifdef DEBUG_SLB
35 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
36 #else
37 # define LOG_SLB(...) do { } while (0)
38 #endif
39
40 /*
41 * Used to indicate that a CPU has its hash page table (HPT) managed
42 * within the host kernel
43 */
44 #define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1)
45
46 /*
47 * SLB handling
48 */
49
50 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
51 {
52 CPUPPCState *env = &cpu->env;
53 uint64_t esid_256M, esid_1T;
54 int n;
55
56 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
57
58 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
59 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
60
61 for (n = 0; n < env->slb_nr; n++) {
62 ppc_slb_t *slb = &env->slb[n];
63
64 LOG_SLB("%s: slot %d %016" PRIx64 " %016"
65 PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
66 /* We check for 1T matches on all MMUs here - if the MMU
67 * doesn't have 1T segment support, we will have prevented 1T
68 * entries from being inserted in the slbmte code. */
69 if (((slb->esid == esid_256M) &&
70 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
71 || ((slb->esid == esid_1T) &&
72 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
73 return slb;
74 }
75 }
76
77 return NULL;
78 }
79
80 void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
81 {
82 CPUPPCState *env = &cpu->env;
83 int i;
84 uint64_t slbe, slbv;
85
86 cpu_synchronize_state(CPU(cpu));
87
88 cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
89 for (i = 0; i < env->slb_nr; i++) {
90 slbe = env->slb[i].esid;
91 slbv = env->slb[i].vsid;
92 if (slbe == 0 && slbv == 0) {
93 continue;
94 }
95 cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
96 i, slbe, slbv);
97 }
98 }
99
100 void helper_slbia(CPUPPCState *env)
101 {
102 int n;
103
104 /* XXX: Warning: slbia never invalidates the first segment */
105 for (n = 1; n < env->slb_nr; n++) {
106 ppc_slb_t *slb = &env->slb[n];
107
108 if (slb->esid & SLB_ESID_V) {
109 slb->esid &= ~SLB_ESID_V;
110 /* XXX: given the fact that segment size is 256 MB or 1TB,
111 * and we still don't have a tlb_flush_mask(env, n, mask)
112 * in QEMU, we just invalidate all TLBs
113 */
114 env->tlb_need_flush = 1;
115 }
116 }
117 }
118
119 void helper_slbie(CPUPPCState *env, target_ulong addr)
120 {
121 PowerPCCPU *cpu = ppc_env_get_cpu(env);
122 ppc_slb_t *slb;
123
124 slb = slb_lookup(cpu, addr);
125 if (!slb) {
126 return;
127 }
128
129 if (slb->esid & SLB_ESID_V) {
130 slb->esid &= ~SLB_ESID_V;
131
132 /* XXX: given the fact that segment size is 256 MB or 1TB,
133 * and we still don't have a tlb_flush_mask(env, n, mask)
134 * in QEMU, we just invalidate all TLBs
135 */
136 env->tlb_need_flush = 1;
137 }
138 }
139
140 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
141 target_ulong esid, target_ulong vsid)
142 {
143 CPUPPCState *env = &cpu->env;
144 ppc_slb_t *slb = &env->slb[slot];
145 const struct ppc_one_seg_page_size *sps = NULL;
146 int i;
147
148 if (slot >= env->slb_nr) {
149 return -1; /* Bad slot number */
150 }
151 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
152 return -1; /* Reserved bits set */
153 }
154 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
155 return -1; /* Bad segment size */
156 }
157 if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
158 return -1; /* 1T segment on MMU that doesn't support it */
159 }
160
161 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
162 const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
163
164 if (!sps1->page_shift) {
165 break;
166 }
167
168 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
169 sps = sps1;
170 break;
171 }
172 }
173
174 if (!sps) {
175 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
176 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
177 slot, esid, vsid);
178 return -1;
179 }
180
181 slb->esid = esid;
182 slb->vsid = vsid;
183 slb->sps = sps;
184
185 LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
186 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
187 slb->esid, slb->vsid);
188
189 return 0;
190 }
191
192 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
193 target_ulong *rt)
194 {
195 CPUPPCState *env = &cpu->env;
196 int slot = rb & 0xfff;
197 ppc_slb_t *slb = &env->slb[slot];
198
199 if (slot >= env->slb_nr) {
200 return -1;
201 }
202
203 *rt = slb->esid;
204 return 0;
205 }
206
207 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
208 target_ulong *rt)
209 {
210 CPUPPCState *env = &cpu->env;
211 int slot = rb & 0xfff;
212 ppc_slb_t *slb = &env->slb[slot];
213
214 if (slot >= env->slb_nr) {
215 return -1;
216 }
217
218 *rt = slb->vsid;
219 return 0;
220 }
221
222 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
223 {
224 PowerPCCPU *cpu = ppc_env_get_cpu(env);
225
226 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
227 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
228 POWERPC_EXCP_INVAL);
229 }
230 }
231
232 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
233 {
234 PowerPCCPU *cpu = ppc_env_get_cpu(env);
235 target_ulong rt = 0;
236
237 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
238 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
239 POWERPC_EXCP_INVAL);
240 }
241 return rt;
242 }
243
244 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
245 {
246 PowerPCCPU *cpu = ppc_env_get_cpu(env);
247 target_ulong rt = 0;
248
249 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
250 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
251 POWERPC_EXCP_INVAL);
252 }
253 return rt;
254 }
255
256 /*
257 * 64-bit hash table MMU handling
258 */
259 void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
260 Error **errp)
261 {
262 CPUPPCState *env = &cpu->env;
263 target_ulong htabsize = value & SDR_64_HTABSIZE;
264
265 env->spr[SPR_SDR1] = value;
266 if (htabsize > 28) {
267 error_setg(errp,
268 "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
269 htabsize);
270 htabsize = 28;
271 }
272 env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
273 env->htab_base = value & SDR_64_HTABORG;
274 }
275
276 void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
277 Error **errp)
278 {
279 CPUPPCState *env = &cpu->env;
280 Error *local_err = NULL;
281
282 if (hpt) {
283 env->external_htab = hpt;
284 } else {
285 env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
286 }
287 ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18),
288 &local_err);
289 if (local_err) {
290 error_propagate(errp, local_err);
291 return;
292 }
293
294 /* Not strictly necessary, but makes it clearer that an external
295 * htab is in use when debugging */
296 env->htab_base = -1;
297
298 if (kvm_enabled()) {
299 if (kvmppc_put_books_sregs(cpu) < 0) {
300 error_setg(errp, "Unable to update SDR1 in KVM");
301 }
302 }
303 }
304
305 static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
306 ppc_slb_t *slb, ppc_hash_pte64_t pte)
307 {
308 CPUPPCState *env = &cpu->env;
309 unsigned pp, key;
310 /* Some pp bit combinations have undefined behaviour, so default
311 * to no access in those cases */
312 int prot = 0;
313
314 key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
315 : (slb->vsid & SLB_VSID_KS));
316 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
317
318 if (key == 0) {
319 switch (pp) {
320 case 0x0:
321 case 0x1:
322 case 0x2:
323 prot = PAGE_READ | PAGE_WRITE;
324 break;
325
326 case 0x3:
327 case 0x6:
328 prot = PAGE_READ;
329 break;
330 }
331 } else {
332 switch (pp) {
333 case 0x0:
334 case 0x6:
335 prot = 0;
336 break;
337
338 case 0x1:
339 case 0x3:
340 prot = PAGE_READ;
341 break;
342
343 case 0x2:
344 prot = PAGE_READ | PAGE_WRITE;
345 break;
346 }
347 }
348
349 /* No execute if either noexec or guarded bits set */
350 if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
351 || (slb->vsid & SLB_VSID_N)) {
352 prot |= PAGE_EXEC;
353 }
354
355 return prot;
356 }
357
358 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
359 {
360 CPUPPCState *env = &cpu->env;
361 int key, amrbits;
362 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
363
364 /* Only recent MMUs implement Virtual Page Class Key Protection */
365 if (!(env->mmu_model & POWERPC_MMU_AMR)) {
366 return prot;
367 }
368
369 key = HPTE64_R_KEY(pte.pte1);
370 amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
371
372 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
373 /* env->spr[SPR_AMR]); */
374
375 /*
376 * A store is permitted if the AMR bit is 0. Remove write
377 * protection if it is set.
378 */
379 if (amrbits & 0x2) {
380 prot &= ~PAGE_WRITE;
381 }
382 /*
383 * A load is permitted if the AMR bit is 0. Remove read
384 * protection if it is set.
385 */
386 if (amrbits & 0x1) {
387 prot &= ~PAGE_READ;
388 }
389
390 return prot;
391 }
392
393 uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
394 {
395 uint64_t token = 0;
396 hwaddr pte_offset;
397
398 pte_offset = pte_index * HASH_PTE_SIZE_64;
399 if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
400 /*
401 * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
402 */
403 token = kvmppc_hash64_read_pteg(cpu, pte_index);
404 } else if (cpu->env.external_htab) {
405 /*
406 * HTAB is controlled by QEMU. Just point to the internally
407 * accessible PTEG.
408 */
409 token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
410 } else if (cpu->env.htab_base) {
411 token = cpu->env.htab_base + pte_offset;
412 }
413 return token;
414 }
415
416 void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
417 {
418 if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
419 kvmppc_hash64_free_pteg(token);
420 }
421 }
422
423 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
424 bool secondary, target_ulong ptem,
425 ppc_hash_pte64_t *pte)
426 {
427 CPUPPCState *env = &cpu->env;
428 int i;
429 uint64_t token;
430 target_ulong pte0, pte1;
431 target_ulong pte_index;
432
433 pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
434 token = ppc_hash64_start_access(cpu, pte_index);
435 if (!token) {
436 return -1;
437 }
438 for (i = 0; i < HPTES_PER_GROUP; i++) {
439 pte0 = ppc_hash64_load_hpte0(cpu, token, i);
440 pte1 = ppc_hash64_load_hpte1(cpu, token, i);
441
442 if ((pte0 & HPTE64_V_VALID)
443 && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
444 && HPTE64_V_COMPARE(pte0, ptem)) {
445 pte->pte0 = pte0;
446 pte->pte1 = pte1;
447 ppc_hash64_stop_access(cpu, token);
448 return (pte_index + i) * HASH_PTE_SIZE_64;
449 }
450 }
451 ppc_hash64_stop_access(cpu, token);
452 /*
453 * We didn't find a valid entry.
454 */
455 return -1;
456 }
457
458 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
459 ppc_slb_t *slb, target_ulong eaddr,
460 ppc_hash_pte64_t *pte)
461 {
462 CPUPPCState *env = &cpu->env;
463 hwaddr pte_offset;
464 hwaddr hash;
465 uint64_t vsid, epnmask, epn, ptem;
466
467 /* The SLB store path should prevent any bad page size encodings
468 * getting in there, so: */
469 assert(slb->sps);
470
471 epnmask = ~((1ULL << slb->sps->page_shift) - 1);
472
473 if (slb->vsid & SLB_VSID_B) {
474 /* 1TB segment */
475 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
476 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
477 hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
478 } else {
479 /* 256M segment */
480 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
481 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
482 hash = vsid ^ (epn >> slb->sps->page_shift);
483 }
484 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
485
486 /* Page address translation */
487 qemu_log_mask(CPU_LOG_MMU,
488 "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
489 " hash " TARGET_FMT_plx "\n",
490 env->htab_base, env->htab_mask, hash);
491
492 /* Primary PTEG lookup */
493 qemu_log_mask(CPU_LOG_MMU,
494 "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
495 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
496 " hash=" TARGET_FMT_plx "\n",
497 env->htab_base, env->htab_mask, vsid, ptem, hash);
498 pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
499
500 if (pte_offset == -1) {
501 /* Secondary PTEG lookup */
502 qemu_log_mask(CPU_LOG_MMU,
503 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
504 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
505 " hash=" TARGET_FMT_plx "\n", env->htab_base,
506 env->htab_mask, vsid, ptem, ~hash);
507
508 pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
509 }
510
511 return pte_offset;
512 }
513
514 static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
515 uint64_t pte0, uint64_t pte1)
516 {
517 int i;
518
519 if (!(pte0 & HPTE64_V_LARGE)) {
520 if (sps->page_shift != 12) {
521 /* 4kiB page in a non 4kiB segment */
522 return 0;
523 }
524 /* Normal 4kiB page */
525 return 12;
526 }
527
528 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
529 const struct ppc_one_page_size *ps = &sps->enc[i];
530 uint64_t mask;
531
532 if (!ps->page_shift) {
533 break;
534 }
535
536 if (ps->page_shift == 12) {
537 /* L bit is set so this can't be a 4kiB page */
538 continue;
539 }
540
541 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
542
543 if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
544 return ps->page_shift;
545 }
546 }
547
548 return 0; /* Bad page size encoding */
549 }
550
551 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
552 uint64_t pte0, uint64_t pte1,
553 unsigned *seg_page_shift)
554 {
555 CPUPPCState *env = &cpu->env;
556 int i;
557
558 if (!(pte0 & HPTE64_V_LARGE)) {
559 *seg_page_shift = 12;
560 return 12;
561 }
562
563 /*
564 * The encodings in env->sps need to be carefully chosen so that
565 * this gives an unambiguous result.
566 */
567 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
568 const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
569 unsigned shift;
570
571 if (!sps->page_shift) {
572 break;
573 }
574
575 shift = hpte_page_shift(sps, pte0, pte1);
576 if (shift) {
577 *seg_page_shift = sps->page_shift;
578 return shift;
579 }
580 }
581
582 *seg_page_shift = 0;
583 return 0;
584 }
585
586 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
587 int rwx, int mmu_idx)
588 {
589 CPUState *cs = CPU(cpu);
590 CPUPPCState *env = &cpu->env;
591 ppc_slb_t *slb;
592 unsigned apshift;
593 hwaddr pte_offset;
594 ppc_hash_pte64_t pte;
595 int pp_prot, amr_prot, prot;
596 uint64_t new_pte1;
597 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
598 hwaddr raddr;
599
600 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
601
602 /* 1. Handle real mode accesses */
603 if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
604 /* Translation is off */
605 /* In real mode the top 4 effective address bits are ignored */
606 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
607 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
608 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
609 TARGET_PAGE_SIZE);
610 return 0;
611 }
612
613 /* 2. Translation is on, so look up the SLB */
614 slb = slb_lookup(cpu, eaddr);
615
616 if (!slb) {
617 if (rwx == 2) {
618 cs->exception_index = POWERPC_EXCP_ISEG;
619 env->error_code = 0;
620 } else {
621 cs->exception_index = POWERPC_EXCP_DSEG;
622 env->error_code = 0;
623 env->spr[SPR_DAR] = eaddr;
624 }
625 return 1;
626 }
627
628 /* 3. Check for segment level no-execute violation */
629 if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
630 cs->exception_index = POWERPC_EXCP_ISI;
631 env->error_code = 0x10000000;
632 return 1;
633 }
634
635 /* 4. Locate the PTE in the hash table */
636 pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
637 if (pte_offset == -1) {
638 if (rwx == 2) {
639 cs->exception_index = POWERPC_EXCP_ISI;
640 env->error_code = 0x40000000;
641 } else {
642 cs->exception_index = POWERPC_EXCP_DSI;
643 env->error_code = 0;
644 env->spr[SPR_DAR] = eaddr;
645 if (rwx == 1) {
646 env->spr[SPR_DSISR] = 0x42000000;
647 } else {
648 env->spr[SPR_DSISR] = 0x40000000;
649 }
650 }
651 return 1;
652 }
653 qemu_log_mask(CPU_LOG_MMU,
654 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
655
656 /* Validate page size encoding */
657 apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
658 if (!apshift) {
659 error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
660 " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
661 /* Not entirely sure what the right action here, but machine
662 * check seems reasonable */
663 cs->exception_index = POWERPC_EXCP_MCHECK;
664 env->error_code = 0;
665 return 1;
666 }
667
668 /* 5. Check access permissions */
669
670 pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
671 amr_prot = ppc_hash64_amr_prot(cpu, pte);
672 prot = pp_prot & amr_prot;
673
674 if ((need_prot[rwx] & ~prot) != 0) {
675 /* Access right violation */
676 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
677 if (rwx == 2) {
678 cs->exception_index = POWERPC_EXCP_ISI;
679 env->error_code = 0x08000000;
680 } else {
681 target_ulong dsisr = 0;
682
683 cs->exception_index = POWERPC_EXCP_DSI;
684 env->error_code = 0;
685 env->spr[SPR_DAR] = eaddr;
686 if (need_prot[rwx] & ~pp_prot) {
687 dsisr |= 0x08000000;
688 }
689 if (rwx == 1) {
690 dsisr |= 0x02000000;
691 }
692 if (need_prot[rwx] & ~amr_prot) {
693 dsisr |= 0x00200000;
694 }
695 env->spr[SPR_DSISR] = dsisr;
696 }
697 return 1;
698 }
699
700 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
701
702 /* 6. Update PTE referenced and changed bits if necessary */
703
704 new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
705 if (rwx == 1) {
706 new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
707 } else {
708 /* Treat the page as read-only for now, so that a later write
709 * will pass through this function again to set the C bit */
710 prot &= ~PAGE_WRITE;
711 }
712
713 if (new_pte1 != pte.pte1) {
714 ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
715 pte.pte0, new_pte1);
716 }
717
718 /* 7. Determine the real address from the PTE */
719
720 raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
721
722 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
723 prot, mmu_idx, 1ULL << apshift);
724
725 return 0;
726 }
727
728 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
729 {
730 CPUPPCState *env = &cpu->env;
731 ppc_slb_t *slb;
732 hwaddr pte_offset;
733 ppc_hash_pte64_t pte;
734 unsigned apshift;
735
736 if (msr_dr == 0) {
737 /* In real mode the top 4 effective address bits are ignored */
738 return addr & 0x0FFFFFFFFFFFFFFFULL;
739 }
740
741 slb = slb_lookup(cpu, addr);
742 if (!slb) {
743 return -1;
744 }
745
746 pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
747 if (pte_offset == -1) {
748 return -1;
749 }
750
751 apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
752 if (!apshift) {
753 return -1;
754 }
755
756 return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
757 & TARGET_PAGE_MASK;
758 }
759
760 void ppc_hash64_store_hpte(PowerPCCPU *cpu,
761 target_ulong pte_index,
762 target_ulong pte0, target_ulong pte1)
763 {
764 CPUPPCState *env = &cpu->env;
765
766 if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
767 kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
768 return;
769 }
770
771 pte_index *= HASH_PTE_SIZE_64;
772 if (env->external_htab) {
773 stq_p(env->external_htab + pte_index, pte0);
774 stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
775 } else {
776 stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
777 stq_phys(CPU(cpu)->as,
778 env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
779 }
780 }
781
782 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
783 target_ulong pte_index,
784 target_ulong pte0, target_ulong pte1)
785 {
786 /*
787 * XXX: given the fact that there are too many segments to
788 * invalidate, and we still don't have a tlb_flush_mask(env, n,
789 * mask) in QEMU, we just invalidate all TLBs
790 */
791 tlb_flush(CPU(cpu), 1);
792 }