hw/arm/virt: Merge VirtBoardInfo and VirtMachineState
[qemu.git] / target / s390x / mem_helper.c
1 /*
2 * S/390 memory access helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26
27 #if !defined(CONFIG_USER_ONLY)
28 #include "hw/s390x/storage-keys.h"
29 #endif
30
31 /*****************************************************************************/
32 /* Softmmu support */
33 #if !defined(CONFIG_USER_ONLY)
34
35 /* try to fill the TLB and return an exception if error. If retaddr is
36 NULL, it means that the function was called in C code (i.e. not
37 from generated code or from helper.c) */
38 /* XXX: fix it to restore all registers */
39 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
40 int mmu_idx, uintptr_t retaddr)
41 {
42 int ret;
43
44 ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
45 if (unlikely(ret != 0)) {
46 if (likely(retaddr)) {
47 /* now we have a real cpu fault */
48 cpu_restore_state(cs, retaddr);
49 }
50 cpu_loop_exit(cs);
51 }
52 }
53
54 #endif
55
56 /* #define DEBUG_HELPER */
57 #ifdef DEBUG_HELPER
58 #define HELPER_LOG(x...) qemu_log(x)
59 #else
60 #define HELPER_LOG(x...)
61 #endif
62
63 /* Reduce the length so that addr + len doesn't cross a page boundary. */
64 static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
65 {
66 #ifndef CONFIG_USER_ONLY
67 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
68 return -addr & ~TARGET_PAGE_MASK;
69 }
70 #endif
71 return len;
72 }
73
74 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
75 uint32_t l)
76 {
77 int mmu_idx = cpu_mmu_index(env, false);
78
79 while (l > 0) {
80 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
81 if (p) {
82 /* Access to the whole page in write mode granted. */
83 int l_adj = adj_len_to_page(l, dest);
84 memset(p, byte, l_adj);
85 dest += l_adj;
86 l -= l_adj;
87 } else {
88 /* We failed to get access to the whole page. The next write
89 access will likely fill the QEMU TLB for the next iteration. */
90 cpu_stb_data(env, dest, byte);
91 dest++;
92 l--;
93 }
94 }
95 }
96
97 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
98 uint32_t l)
99 {
100 int mmu_idx = cpu_mmu_index(env, false);
101
102 while (l > 0) {
103 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
104 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
105 if (src_p && dest_p) {
106 /* Access to both whole pages granted. */
107 int l_adj = adj_len_to_page(l, src);
108 l_adj = adj_len_to_page(l_adj, dest);
109 memmove(dest_p, src_p, l_adj);
110 src += l_adj;
111 dest += l_adj;
112 l -= l_adj;
113 } else {
114 /* We failed to get access to one or both whole pages. The next
115 read or write access will likely fill the QEMU TLB for the
116 next iteration. */
117 cpu_stb_data(env, dest, cpu_ldub_data(env, src));
118 src++;
119 dest++;
120 l--;
121 }
122 }
123 }
124
125 /* and on array */
126 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
127 uint64_t src)
128 {
129 int i;
130 unsigned char x;
131 uint32_t cc = 0;
132
133 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
134 __func__, l, dest, src);
135 for (i = 0; i <= l; i++) {
136 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
137 if (x) {
138 cc = 1;
139 }
140 cpu_stb_data(env, dest + i, x);
141 }
142 return cc;
143 }
144
145 /* xor on array */
146 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
147 uint64_t src)
148 {
149 int i;
150 unsigned char x;
151 uint32_t cc = 0;
152
153 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
154 __func__, l, dest, src);
155
156 /* xor with itself is the same as memset(0) */
157 if (src == dest) {
158 fast_memset(env, dest, 0, l + 1);
159 return 0;
160 }
161
162 for (i = 0; i <= l; i++) {
163 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
164 if (x) {
165 cc = 1;
166 }
167 cpu_stb_data(env, dest + i, x);
168 }
169 return cc;
170 }
171
172 /* or on array */
173 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
174 uint64_t src)
175 {
176 int i;
177 unsigned char x;
178 uint32_t cc = 0;
179
180 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
181 __func__, l, dest, src);
182 for (i = 0; i <= l; i++) {
183 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
184 if (x) {
185 cc = 1;
186 }
187 cpu_stb_data(env, dest + i, x);
188 }
189 return cc;
190 }
191
192 /* memmove */
193 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
194 {
195 int i = 0;
196
197 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
198 __func__, l, dest, src);
199
200 /* mvc with source pointing to the byte after the destination is the
201 same as memset with the first source byte */
202 if (dest == (src + 1)) {
203 fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
204 return;
205 }
206
207 /* mvc and memmove do not behave the same when areas overlap! */
208 if ((dest < src) || (src + l < dest)) {
209 fast_memmove(env, dest, src, l + 1);
210 return;
211 }
212
213 /* slow version with byte accesses which always work */
214 for (i = 0; i <= l; i++) {
215 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
216 }
217 }
218
219 /* compare unsigned byte arrays */
220 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
221 {
222 int i;
223 unsigned char x, y;
224 uint32_t cc;
225
226 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
227 __func__, l, s1, s2);
228 for (i = 0; i <= l; i++) {
229 x = cpu_ldub_data(env, s1 + i);
230 y = cpu_ldub_data(env, s2 + i);
231 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
232 if (x < y) {
233 cc = 1;
234 goto done;
235 } else if (x > y) {
236 cc = 2;
237 goto done;
238 }
239 }
240 cc = 0;
241 done:
242 HELPER_LOG("\n");
243 return cc;
244 }
245
246 /* compare logical under mask */
247 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
248 uint64_t addr)
249 {
250 uint8_t r, d;
251 uint32_t cc;
252
253 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
254 mask, addr);
255 cc = 0;
256 while (mask) {
257 if (mask & 8) {
258 d = cpu_ldub_data(env, addr);
259 r = (r1 & 0xff000000UL) >> 24;
260 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
261 addr);
262 if (r < d) {
263 cc = 1;
264 break;
265 } else if (r > d) {
266 cc = 2;
267 break;
268 }
269 addr++;
270 }
271 mask = (mask << 1) & 0xf;
272 r1 <<= 8;
273 }
274 HELPER_LOG("\n");
275 return cc;
276 }
277
278 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
279 {
280 /* 31-Bit mode */
281 if (!(env->psw.mask & PSW_MASK_64)) {
282 a &= 0x7fffffff;
283 }
284 return a;
285 }
286
287 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
288 {
289 uint64_t r = d2;
290 if (x2) {
291 r += env->regs[x2];
292 }
293 if (b2) {
294 r += env->regs[b2];
295 }
296 return fix_address(env, r);
297 }
298
299 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
300 {
301 return fix_address(env, env->regs[reg]);
302 }
303
304 /* search string (c is byte to search, r2 is string, r1 end of string) */
305 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
306 uint64_t str)
307 {
308 uint32_t len;
309 uint8_t v, c = r0;
310
311 str = fix_address(env, str);
312 end = fix_address(env, end);
313
314 /* Assume for now that R2 is unmodified. */
315 env->retxl = str;
316
317 /* Lest we fail to service interrupts in a timely manner, limit the
318 amount of work we're willing to do. For now, let's cap at 8k. */
319 for (len = 0; len < 0x2000; ++len) {
320 if (str + len == end) {
321 /* Character not found. R1 & R2 are unmodified. */
322 env->cc_op = 2;
323 return end;
324 }
325 v = cpu_ldub_data(env, str + len);
326 if (v == c) {
327 /* Character found. Set R1 to the location; R2 is unmodified. */
328 env->cc_op = 1;
329 return str + len;
330 }
331 }
332
333 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
334 env->retxl = str + len;
335 env->cc_op = 3;
336 return end;
337 }
338
339 /* unsigned string compare (c is string terminator) */
340 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
341 {
342 uint32_t len;
343
344 c = c & 0xff;
345 s1 = fix_address(env, s1);
346 s2 = fix_address(env, s2);
347
348 /* Lest we fail to service interrupts in a timely manner, limit the
349 amount of work we're willing to do. For now, let's cap at 8k. */
350 for (len = 0; len < 0x2000; ++len) {
351 uint8_t v1 = cpu_ldub_data(env, s1 + len);
352 uint8_t v2 = cpu_ldub_data(env, s2 + len);
353 if (v1 == v2) {
354 if (v1 == c) {
355 /* Equal. CC=0, and don't advance the registers. */
356 env->cc_op = 0;
357 env->retxl = s2;
358 return s1;
359 }
360 } else {
361 /* Unequal. CC={1,2}, and advance the registers. Note that
362 the terminator need not be zero, but the string that contains
363 the terminator is by definition "low". */
364 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
365 env->retxl = s2 + len;
366 return s1 + len;
367 }
368 }
369
370 /* CPU-determined bytes equal; advance the registers. */
371 env->cc_op = 3;
372 env->retxl = s2 + len;
373 return s1 + len;
374 }
375
376 /* move page */
377 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
378 {
379 /* XXX missing r0 handling */
380 env->cc_op = 0;
381 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
382 }
383
384 /* string copy (c is string terminator) */
385 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
386 {
387 uint32_t len;
388
389 c = c & 0xff;
390 d = fix_address(env, d);
391 s = fix_address(env, s);
392
393 /* Lest we fail to service interrupts in a timely manner, limit the
394 amount of work we're willing to do. For now, let's cap at 8k. */
395 for (len = 0; len < 0x2000; ++len) {
396 uint8_t v = cpu_ldub_data(env, s + len);
397 cpu_stb_data(env, d + len, v);
398 if (v == c) {
399 /* Complete. Set CC=1 and advance R1. */
400 env->cc_op = 1;
401 env->retxl = s;
402 return d + len;
403 }
404 }
405
406 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
407 env->cc_op = 3;
408 env->retxl = s + len;
409 return d + len;
410 }
411
412 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
413 uint32_t mask)
414 {
415 int pos = 24; /* top of the lower half of r1 */
416 uint64_t rmask = 0xff000000ULL;
417 uint8_t val = 0;
418 int ccd = 0;
419 uint32_t cc = 0;
420
421 while (mask) {
422 if (mask & 8) {
423 env->regs[r1] &= ~rmask;
424 val = cpu_ldub_data(env, address);
425 if ((val & 0x80) && !ccd) {
426 cc = 1;
427 }
428 ccd = 1;
429 if (val && cc == 0) {
430 cc = 2;
431 }
432 env->regs[r1] |= (uint64_t)val << pos;
433 address++;
434 }
435 mask = (mask << 1) & 0xf;
436 pos -= 8;
437 rmask >>= 8;
438 }
439
440 return cc;
441 }
442
443 /* execute instruction
444 this instruction executes an insn modified with the contents of r1
445 it does not change the executed instruction in memory
446 it does not change the program counter
447 in other words: tricky...
448 currently implemented by interpreting the cases it is most commonly used in
449 */
450 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
451 uint64_t addr, uint64_t ret)
452 {
453 S390CPU *cpu = s390_env_get_cpu(env);
454 uint16_t insn = cpu_lduw_code(env, addr);
455
456 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
457 insn);
458 if ((insn & 0xf0ff) == 0xd000) {
459 uint32_t l, insn2, b1, b2, d1, d2;
460
461 l = v1 & 0xff;
462 insn2 = cpu_ldl_code(env, addr + 2);
463 b1 = (insn2 >> 28) & 0xf;
464 b2 = (insn2 >> 12) & 0xf;
465 d1 = (insn2 >> 16) & 0xfff;
466 d2 = insn2 & 0xfff;
467 switch (insn & 0xf00) {
468 case 0x200:
469 helper_mvc(env, l, get_address(env, 0, b1, d1),
470 get_address(env, 0, b2, d2));
471 break;
472 case 0x400:
473 cc = helper_nc(env, l, get_address(env, 0, b1, d1),
474 get_address(env, 0, b2, d2));
475 break;
476 case 0x500:
477 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
478 get_address(env, 0, b2, d2));
479 break;
480 case 0x600:
481 cc = helper_oc(env, l, get_address(env, 0, b1, d1),
482 get_address(env, 0, b2, d2));
483 break;
484 case 0x700:
485 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
486 get_address(env, 0, b2, d2));
487 break;
488 case 0xc00:
489 helper_tr(env, l, get_address(env, 0, b1, d1),
490 get_address(env, 0, b2, d2));
491 break;
492 case 0xd00:
493 cc = helper_trt(env, l, get_address(env, 0, b1, d1),
494 get_address(env, 0, b2, d2));
495 break;
496 default:
497 goto abort;
498 }
499 } else if ((insn & 0xff00) == 0x0a00) {
500 /* supervisor call */
501 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
502 env->psw.addr = ret - 4;
503 env->int_svc_code = (insn | v1) & 0xff;
504 env->int_svc_ilen = 4;
505 helper_exception(env, EXCP_SVC);
506 } else if ((insn & 0xff00) == 0xbf00) {
507 uint32_t insn2, r1, r3, b2, d2;
508
509 insn2 = cpu_ldl_code(env, addr + 2);
510 r1 = (insn2 >> 20) & 0xf;
511 r3 = (insn2 >> 16) & 0xf;
512 b2 = (insn2 >> 12) & 0xf;
513 d2 = insn2 & 0xfff;
514 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
515 } else {
516 abort:
517 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
518 insn);
519 }
520 return cc;
521 }
522
523 /* load access registers r1 to r3 from memory at a2 */
524 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
525 {
526 int i;
527
528 for (i = r1;; i = (i + 1) % 16) {
529 env->aregs[i] = cpu_ldl_data(env, a2);
530 a2 += 4;
531
532 if (i == r3) {
533 break;
534 }
535 }
536 }
537
538 /* store access registers r1 to r3 in memory at a2 */
539 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
540 {
541 int i;
542
543 for (i = r1;; i = (i + 1) % 16) {
544 cpu_stl_data(env, a2, env->aregs[i]);
545 a2 += 4;
546
547 if (i == r3) {
548 break;
549 }
550 }
551 }
552
553 /* move long */
554 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
555 {
556 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
557 uint64_t dest = get_address_31fix(env, r1);
558 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
559 uint64_t src = get_address_31fix(env, r2);
560 uint8_t pad = env->regs[r2 + 1] >> 24;
561 uint8_t v;
562 uint32_t cc;
563
564 if (destlen == srclen) {
565 cc = 0;
566 } else if (destlen < srclen) {
567 cc = 1;
568 } else {
569 cc = 2;
570 }
571
572 if (srclen > destlen) {
573 srclen = destlen;
574 }
575
576 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
577 v = cpu_ldub_data(env, src);
578 cpu_stb_data(env, dest, v);
579 }
580
581 for (; destlen; dest++, destlen--) {
582 cpu_stb_data(env, dest, pad);
583 }
584
585 env->regs[r1 + 1] = destlen;
586 /* can't use srclen here, we trunc'ed it */
587 env->regs[r2 + 1] -= src - env->regs[r2];
588 env->regs[r1] = dest;
589 env->regs[r2] = src;
590
591 return cc;
592 }
593
594 /* move long extended another memcopy insn with more bells and whistles */
595 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
596 uint32_t r3)
597 {
598 uint64_t destlen = env->regs[r1 + 1];
599 uint64_t dest = env->regs[r1];
600 uint64_t srclen = env->regs[r3 + 1];
601 uint64_t src = env->regs[r3];
602 uint8_t pad = a2 & 0xff;
603 uint8_t v;
604 uint32_t cc;
605
606 if (!(env->psw.mask & PSW_MASK_64)) {
607 destlen = (uint32_t)destlen;
608 srclen = (uint32_t)srclen;
609 dest &= 0x7fffffff;
610 src &= 0x7fffffff;
611 }
612
613 if (destlen == srclen) {
614 cc = 0;
615 } else if (destlen < srclen) {
616 cc = 1;
617 } else {
618 cc = 2;
619 }
620
621 if (srclen > destlen) {
622 srclen = destlen;
623 }
624
625 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
626 v = cpu_ldub_data(env, src);
627 cpu_stb_data(env, dest, v);
628 }
629
630 for (; destlen; dest++, destlen--) {
631 cpu_stb_data(env, dest, pad);
632 }
633
634 env->regs[r1 + 1] = destlen;
635 /* can't use srclen here, we trunc'ed it */
636 /* FIXME: 31-bit mode! */
637 env->regs[r3 + 1] -= src - env->regs[r3];
638 env->regs[r1] = dest;
639 env->regs[r3] = src;
640
641 return cc;
642 }
643
644 /* compare logical long extended memcompare insn with padding */
645 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
646 uint32_t r3)
647 {
648 uint64_t destlen = env->regs[r1 + 1];
649 uint64_t dest = get_address_31fix(env, r1);
650 uint64_t srclen = env->regs[r3 + 1];
651 uint64_t src = get_address_31fix(env, r3);
652 uint8_t pad = a2 & 0xff;
653 uint8_t v1 = 0, v2 = 0;
654 uint32_t cc = 0;
655
656 if (!(destlen || srclen)) {
657 return cc;
658 }
659
660 if (srclen > destlen) {
661 srclen = destlen;
662 }
663
664 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
665 v1 = srclen ? cpu_ldub_data(env, src) : pad;
666 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
667 if (v1 != v2) {
668 cc = (v1 < v2) ? 1 : 2;
669 break;
670 }
671 }
672
673 env->regs[r1 + 1] = destlen;
674 /* can't use srclen here, we trunc'ed it */
675 env->regs[r3 + 1] -= src - env->regs[r3];
676 env->regs[r1] = dest;
677 env->regs[r3] = src;
678
679 return cc;
680 }
681
682 /* checksum */
683 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
684 uint64_t src, uint64_t src_len)
685 {
686 uint64_t max_len, len;
687 uint64_t cksm = (uint32_t)r1;
688
689 /* Lest we fail to service interrupts in a timely manner, limit the
690 amount of work we're willing to do. For now, let's cap at 8k. */
691 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
692
693 /* Process full words as available. */
694 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
695 cksm += (uint32_t)cpu_ldl_data(env, src);
696 }
697
698 switch (max_len - len) {
699 case 1:
700 cksm += cpu_ldub_data(env, src) << 24;
701 len += 1;
702 break;
703 case 2:
704 cksm += cpu_lduw_data(env, src) << 16;
705 len += 2;
706 break;
707 case 3:
708 cksm += cpu_lduw_data(env, src) << 16;
709 cksm += cpu_ldub_data(env, src + 2) << 8;
710 len += 3;
711 break;
712 }
713
714 /* Fold the carry from the checksum. Note that we can see carry-out
715 during folding more than once (but probably not more than twice). */
716 while (cksm > 0xffffffffull) {
717 cksm = (uint32_t)cksm + (cksm >> 32);
718 }
719
720 /* Indicate whether or not we've processed everything. */
721 env->cc_op = (len == src_len ? 0 : 3);
722
723 /* Return both cksm and processed length. */
724 env->retxl = cksm;
725 return len;
726 }
727
728 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
729 uint64_t src)
730 {
731 int len_dest = len >> 4;
732 int len_src = len & 0xf;
733 uint8_t b;
734 int second_nibble = 0;
735
736 dest += len_dest;
737 src += len_src;
738
739 /* last byte is special, it only flips the nibbles */
740 b = cpu_ldub_data(env, src);
741 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
742 src--;
743 len_src--;
744
745 /* now pad every nibble with 0xf0 */
746
747 while (len_dest > 0) {
748 uint8_t cur_byte = 0;
749
750 if (len_src > 0) {
751 cur_byte = cpu_ldub_data(env, src);
752 }
753
754 len_dest--;
755 dest--;
756
757 /* only advance one nibble at a time */
758 if (second_nibble) {
759 cur_byte >>= 4;
760 len_src--;
761 src--;
762 }
763 second_nibble = !second_nibble;
764
765 /* digit */
766 cur_byte = (cur_byte & 0xf);
767 /* zone bits */
768 cur_byte |= 0xf0;
769
770 cpu_stb_data(env, dest, cur_byte);
771 }
772 }
773
774 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
775 uint64_t trans)
776 {
777 int i;
778
779 for (i = 0; i <= len; i++) {
780 uint8_t byte = cpu_ldub_data(env, array + i);
781 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
782
783 cpu_stb_data(env, array + i, new_byte);
784 }
785 }
786
787 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
788 uint64_t len, uint64_t trans)
789 {
790 uint8_t end = env->regs[0] & 0xff;
791 uint64_t l = len;
792 uint64_t i;
793
794 if (!(env->psw.mask & PSW_MASK_64)) {
795 array &= 0x7fffffff;
796 l = (uint32_t)l;
797 }
798
799 /* Lest we fail to service interrupts in a timely manner, limit the
800 amount of work we're willing to do. For now, let's cap at 8k. */
801 if (l > 0x2000) {
802 l = 0x2000;
803 env->cc_op = 3;
804 } else {
805 env->cc_op = 0;
806 }
807
808 for (i = 0; i < l; i++) {
809 uint8_t byte, new_byte;
810
811 byte = cpu_ldub_data(env, array + i);
812
813 if (byte == end) {
814 env->cc_op = 1;
815 break;
816 }
817
818 new_byte = cpu_ldub_data(env, trans + byte);
819 cpu_stb_data(env, array + i, new_byte);
820 }
821
822 env->retxl = len - i;
823 return array + i;
824 }
825
826 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
827 uint64_t trans)
828 {
829 uint32_t cc = 0;
830 int i;
831
832 for (i = 0; i <= len; i++) {
833 uint8_t byte = cpu_ldub_data(env, array + i);
834 uint8_t sbyte = cpu_ldub_data(env, trans + byte);
835
836 if (sbyte != 0) {
837 env->regs[1] = array + i;
838 env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
839 cc = (i == len) ? 2 : 1;
840 break;
841 }
842 }
843
844 return cc;
845 }
846
847 #if !defined(CONFIG_USER_ONLY)
848 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
849 {
850 S390CPU *cpu = s390_env_get_cpu(env);
851 bool PERchanged = false;
852 int i;
853 uint64_t src = a2;
854 uint64_t val;
855
856 for (i = r1;; i = (i + 1) % 16) {
857 val = cpu_ldq_data(env, src);
858 if (env->cregs[i] != val && i >= 9 && i <= 11) {
859 PERchanged = true;
860 }
861 env->cregs[i] = val;
862 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
863 i, src, env->cregs[i]);
864 src += sizeof(uint64_t);
865
866 if (i == r3) {
867 break;
868 }
869 }
870
871 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
872 s390_cpu_recompute_watchpoints(CPU(cpu));
873 }
874
875 tlb_flush(CPU(cpu), 1);
876 }
877
878 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
879 {
880 S390CPU *cpu = s390_env_get_cpu(env);
881 bool PERchanged = false;
882 int i;
883 uint64_t src = a2;
884 uint32_t val;
885
886 for (i = r1;; i = (i + 1) % 16) {
887 val = cpu_ldl_data(env, src);
888 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
889 PERchanged = true;
890 }
891 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
892 src += sizeof(uint32_t);
893
894 if (i == r3) {
895 break;
896 }
897 }
898
899 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
900 s390_cpu_recompute_watchpoints(CPU(cpu));
901 }
902
903 tlb_flush(CPU(cpu), 1);
904 }
905
906 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
907 {
908 int i;
909 uint64_t dest = a2;
910
911 for (i = r1;; i = (i + 1) % 16) {
912 cpu_stq_data(env, dest, env->cregs[i]);
913 dest += sizeof(uint64_t);
914
915 if (i == r3) {
916 break;
917 }
918 }
919 }
920
921 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
922 {
923 int i;
924 uint64_t dest = a2;
925
926 for (i = r1;; i = (i + 1) % 16) {
927 cpu_stl_data(env, dest, env->cregs[i]);
928 dest += sizeof(uint32_t);
929
930 if (i == r3) {
931 break;
932 }
933 }
934 }
935
936 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
937 {
938 /* XXX implement */
939
940 return 0;
941 }
942
943 /* insert storage key extended */
944 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
945 {
946 static S390SKeysState *ss;
947 static S390SKeysClass *skeyclass;
948 uint64_t addr = get_address(env, 0, 0, r2);
949 uint8_t key;
950
951 if (addr > ram_size) {
952 return 0;
953 }
954
955 if (unlikely(!ss)) {
956 ss = s390_get_skeys_device();
957 skeyclass = S390_SKEYS_GET_CLASS(ss);
958 }
959
960 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
961 return 0;
962 }
963 return key;
964 }
965
966 /* set storage key extended */
967 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
968 {
969 static S390SKeysState *ss;
970 static S390SKeysClass *skeyclass;
971 uint64_t addr = get_address(env, 0, 0, r2);
972 uint8_t key;
973
974 if (addr > ram_size) {
975 return;
976 }
977
978 if (unlikely(!ss)) {
979 ss = s390_get_skeys_device();
980 skeyclass = S390_SKEYS_GET_CLASS(ss);
981 }
982
983 key = (uint8_t) r1;
984 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
985 }
986
987 /* reset reference bit extended */
988 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
989 {
990 static S390SKeysState *ss;
991 static S390SKeysClass *skeyclass;
992 uint8_t re, key;
993
994 if (r2 > ram_size) {
995 return 0;
996 }
997
998 if (unlikely(!ss)) {
999 ss = s390_get_skeys_device();
1000 skeyclass = S390_SKEYS_GET_CLASS(ss);
1001 }
1002
1003 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1004 return 0;
1005 }
1006
1007 re = key & (SK_R | SK_C);
1008 key &= ~SK_R;
1009
1010 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1011 return 0;
1012 }
1013
1014 /*
1015 * cc
1016 *
1017 * 0 Reference bit zero; change bit zero
1018 * 1 Reference bit zero; change bit one
1019 * 2 Reference bit one; change bit zero
1020 * 3 Reference bit one; change bit one
1021 */
1022
1023 return re >> 1;
1024 }
1025
1026 /* compare and swap and purge */
1027 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
1028 {
1029 S390CPU *cpu = s390_env_get_cpu(env);
1030 uint32_t cc;
1031 uint32_t o1 = env->regs[r1];
1032 uint64_t a2 = r2 & ~3ULL;
1033 uint32_t o2 = cpu_ldl_data(env, a2);
1034
1035 if (o1 == o2) {
1036 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
1037 if (r2 & 0x3) {
1038 /* flush TLB / ALB */
1039 tlb_flush(CPU(cpu), 1);
1040 }
1041 cc = 0;
1042 } else {
1043 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1044 cc = 1;
1045 }
1046
1047 return cc;
1048 }
1049
1050 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1051 {
1052 int cc = 0, i;
1053
1054 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1055 __func__, l, a1, a2);
1056
1057 if (l > 256) {
1058 /* max 256 */
1059 l = 256;
1060 cc = 3;
1061 }
1062
1063 /* XXX replace w/ memcpy */
1064 for (i = 0; i < l; i++) {
1065 cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
1066 }
1067
1068 return cc;
1069 }
1070
1071 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1072 {
1073 int cc = 0, i;
1074
1075 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1076 __func__, l, a1, a2);
1077
1078 if (l > 256) {
1079 /* max 256 */
1080 l = 256;
1081 cc = 3;
1082 }
1083
1084 /* XXX replace w/ memcpy */
1085 for (i = 0; i < l; i++) {
1086 cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
1087 }
1088
1089 return cc;
1090 }
1091
1092 /* invalidate pte */
1093 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1094 {
1095 CPUState *cs = CPU(s390_env_get_cpu(env));
1096 uint64_t page = vaddr & TARGET_PAGE_MASK;
1097 uint64_t pte = 0;
1098
1099 /* XXX broadcast to other CPUs */
1100
1101 /* XXX Linux is nice enough to give us the exact pte address.
1102 According to spec we'd have to find it out ourselves */
1103 /* XXX Linux is fine with overwriting the pte, the spec requires
1104 us to only set the invalid bit */
1105 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1106
1107 /* XXX we exploit the fact that Linux passes the exact virtual
1108 address here - it's not obliged to! */
1109 tlb_flush_page(cs, page);
1110
1111 /* XXX 31-bit hack */
1112 if (page & 0x80000000) {
1113 tlb_flush_page(cs, page & ~0x80000000);
1114 } else {
1115 tlb_flush_page(cs, page | 0x80000000);
1116 }
1117 }
1118
1119 /* flush local tlb */
1120 void HELPER(ptlb)(CPUS390XState *env)
1121 {
1122 S390CPU *cpu = s390_env_get_cpu(env);
1123
1124 tlb_flush(CPU(cpu), 1);
1125 }
1126
1127 /* load using real address */
1128 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1129 {
1130 CPUState *cs = CPU(s390_env_get_cpu(env));
1131
1132 return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
1133 }
1134
1135 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1136 {
1137 CPUState *cs = CPU(s390_env_get_cpu(env));
1138
1139 return ldq_phys(cs->as, get_address(env, 0, 0, addr));
1140 }
1141
1142 /* store using real address */
1143 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1144 {
1145 CPUState *cs = CPU(s390_env_get_cpu(env));
1146
1147 stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1148
1149 if ((env->psw.mask & PSW_MASK_PER) &&
1150 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1151 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1152 /* PSW is saved just before calling the helper. */
1153 env->per_address = env->psw.addr;
1154 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1155 }
1156 }
1157
1158 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1159 {
1160 CPUState *cs = CPU(s390_env_get_cpu(env));
1161
1162 stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
1163
1164 if ((env->psw.mask & PSW_MASK_PER) &&
1165 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1166 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1167 /* PSW is saved just before calling the helper. */
1168 env->per_address = env->psw.addr;
1169 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1170 }
1171 }
1172
1173 /* load real address */
1174 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1175 {
1176 CPUState *cs = CPU(s390_env_get_cpu(env));
1177 uint32_t cc = 0;
1178 int old_exc = cs->exception_index;
1179 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1180 uint64_t ret;
1181 int flags;
1182
1183 /* XXX incomplete - has more corner cases */
1184 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1185 program_interrupt(env, PGM_SPECIAL_OP, 2);
1186 }
1187
1188 cs->exception_index = old_exc;
1189 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1190 cc = 3;
1191 }
1192 if (cs->exception_index == EXCP_PGM) {
1193 ret = env->int_pgm_code | 0x80000000;
1194 } else {
1195 ret |= addr & ~TARGET_PAGE_MASK;
1196 }
1197 cs->exception_index = old_exc;
1198
1199 env->cc_op = cc;
1200 return ret;
1201 }
1202 #endif