target/riscv: vector integer add-with-carry / subtract-with-borrow instructions
[qemu.git] / target / riscv / insn_trans / trans_rvv.inc.c
1 /*
2 * RISC-V translation routines for the RVV Standard Extension.
3 *
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "tcg/tcg-op-gvec.h"
19 #include "tcg/tcg-gvec-desc.h"
20 #include "internals.h"
21
22 static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
23 {
24 TCGv s1, s2, dst;
25
26 if (!has_ext(ctx, RVV)) {
27 return false;
28 }
29
30 s2 = tcg_temp_new();
31 dst = tcg_temp_new();
32
33 /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
34 if (a->rs1 == 0) {
35 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
36 s1 = tcg_const_tl(RV_VLEN_MAX);
37 } else {
38 s1 = tcg_temp_new();
39 gen_get_gpr(s1, a->rs1);
40 }
41 gen_get_gpr(s2, a->rs2);
42 gen_helper_vsetvl(dst, cpu_env, s1, s2);
43 gen_set_gpr(a->rd, dst);
44 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
45 lookup_and_goto_ptr(ctx);
46 ctx->base.is_jmp = DISAS_NORETURN;
47
48 tcg_temp_free(s1);
49 tcg_temp_free(s2);
50 tcg_temp_free(dst);
51 return true;
52 }
53
54 static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
55 {
56 TCGv s1, s2, dst;
57
58 if (!has_ext(ctx, RVV)) {
59 return false;
60 }
61
62 s2 = tcg_const_tl(a->zimm);
63 dst = tcg_temp_new();
64
65 /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
66 if (a->rs1 == 0) {
67 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
68 s1 = tcg_const_tl(RV_VLEN_MAX);
69 } else {
70 s1 = tcg_temp_new();
71 gen_get_gpr(s1, a->rs1);
72 }
73 gen_helper_vsetvl(dst, cpu_env, s1, s2);
74 gen_set_gpr(a->rd, dst);
75 gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
76 ctx->base.is_jmp = DISAS_NORETURN;
77
78 tcg_temp_free(s1);
79 tcg_temp_free(s2);
80 tcg_temp_free(dst);
81 return true;
82 }
83
84 /* vector register offset from env */
85 static uint32_t vreg_ofs(DisasContext *s, int reg)
86 {
87 return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
88 }
89
90 /* check functions */
91
92 /*
93 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
94 * So RVV is also be checked in this function.
95 */
96 static bool vext_check_isa_ill(DisasContext *s)
97 {
98 return !s->vill;
99 }
100
101 /*
102 * There are two rules check here.
103 *
104 * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
105 *
106 * 2. For all widening instructions, the destination LMUL value must also be
107 * a supported LMUL value. (Section 11.2)
108 */
109 static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
110 {
111 /*
112 * The destination vector register group results are arranged as if both
113 * SEW and LMUL were at twice their current settings. (Section 11.2).
114 */
115 int legal = widen ? 2 << s->lmul : 1 << s->lmul;
116
117 return !((s->lmul == 0x3 && widen) || (reg % legal));
118 }
119
120 /*
121 * There are two rules check here.
122 *
123 * 1. The destination vector register group for a masked vector instruction can
124 * only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
125 *
126 * 2. In widen instructions and some other insturctions, like vslideup.vx,
127 * there is no need to check whether LMUL=1.
128 */
129 static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
130 bool force)
131 {
132 return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
133 }
134
135 /* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
136 static bool vext_check_nf(DisasContext *s, uint32_t nf)
137 {
138 return (1 << s->lmul) * nf <= 8;
139 }
140
141 /*
142 * The destination vector register group cannot overlap a source vector register
143 * group of a different element width. (Section 11.2)
144 */
145 static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen)
146 {
147 return ((rd >= rs + slen) || (rs >= rd + dlen));
148 }
149 /* common translation macro */
150 #define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \
151 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
152 { \
153 if (CHECK(s, a)) { \
154 return OP(s, a, SEQ); \
155 } \
156 return false; \
157 }
158
159 /*
160 *** unit stride load and store
161 */
162 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
163 TCGv_env, TCGv_i32);
164
165 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
166 gen_helper_ldst_us *fn, DisasContext *s)
167 {
168 TCGv_ptr dest, mask;
169 TCGv base;
170 TCGv_i32 desc;
171
172 TCGLabel *over = gen_new_label();
173 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
174
175 dest = tcg_temp_new_ptr();
176 mask = tcg_temp_new_ptr();
177 base = tcg_temp_new();
178
179 /*
180 * As simd_desc supports at most 256 bytes, and in this implementation,
181 * the max vector group length is 2048 bytes. So split it into two parts.
182 *
183 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
184 * The second part is lmul, encoded in data of simd_desc.
185 */
186 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
187
188 gen_get_gpr(base, rs1);
189 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
190 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
191
192 fn(dest, mask, base, cpu_env, desc);
193
194 tcg_temp_free_ptr(dest);
195 tcg_temp_free_ptr(mask);
196 tcg_temp_free(base);
197 tcg_temp_free_i32(desc);
198 gen_set_label(over);
199 return true;
200 }
201
202 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
203 {
204 uint32_t data = 0;
205 gen_helper_ldst_us *fn;
206 static gen_helper_ldst_us * const fns[2][7][4] = {
207 /* masked unit stride load */
208 { { gen_helper_vlb_v_b_mask, gen_helper_vlb_v_h_mask,
209 gen_helper_vlb_v_w_mask, gen_helper_vlb_v_d_mask },
210 { NULL, gen_helper_vlh_v_h_mask,
211 gen_helper_vlh_v_w_mask, gen_helper_vlh_v_d_mask },
212 { NULL, NULL,
213 gen_helper_vlw_v_w_mask, gen_helper_vlw_v_d_mask },
214 { gen_helper_vle_v_b_mask, gen_helper_vle_v_h_mask,
215 gen_helper_vle_v_w_mask, gen_helper_vle_v_d_mask },
216 { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
217 gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
218 { NULL, gen_helper_vlhu_v_h_mask,
219 gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
220 { NULL, NULL,
221 gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
222 /* unmasked unit stride load */
223 { { gen_helper_vlb_v_b, gen_helper_vlb_v_h,
224 gen_helper_vlb_v_w, gen_helper_vlb_v_d },
225 { NULL, gen_helper_vlh_v_h,
226 gen_helper_vlh_v_w, gen_helper_vlh_v_d },
227 { NULL, NULL,
228 gen_helper_vlw_v_w, gen_helper_vlw_v_d },
229 { gen_helper_vle_v_b, gen_helper_vle_v_h,
230 gen_helper_vle_v_w, gen_helper_vle_v_d },
231 { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
232 gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
233 { NULL, gen_helper_vlhu_v_h,
234 gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
235 { NULL, NULL,
236 gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
237 };
238
239 fn = fns[a->vm][seq][s->sew];
240 if (fn == NULL) {
241 return false;
242 }
243
244 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
245 data = FIELD_DP32(data, VDATA, VM, a->vm);
246 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
247 data = FIELD_DP32(data, VDATA, NF, a->nf);
248 return ldst_us_trans(a->rd, a->rs1, data, fn, s);
249 }
250
251 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
252 {
253 return (vext_check_isa_ill(s) &&
254 vext_check_overlap_mask(s, a->rd, a->vm, false) &&
255 vext_check_reg(s, a->rd, false) &&
256 vext_check_nf(s, a->nf));
257 }
258
259 GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
260 GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
261 GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
262 GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
263 GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
264 GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
265 GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
266
267 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
268 {
269 uint32_t data = 0;
270 gen_helper_ldst_us *fn;
271 static gen_helper_ldst_us * const fns[2][4][4] = {
272 /* masked unit stride load and store */
273 { { gen_helper_vsb_v_b_mask, gen_helper_vsb_v_h_mask,
274 gen_helper_vsb_v_w_mask, gen_helper_vsb_v_d_mask },
275 { NULL, gen_helper_vsh_v_h_mask,
276 gen_helper_vsh_v_w_mask, gen_helper_vsh_v_d_mask },
277 { NULL, NULL,
278 gen_helper_vsw_v_w_mask, gen_helper_vsw_v_d_mask },
279 { gen_helper_vse_v_b_mask, gen_helper_vse_v_h_mask,
280 gen_helper_vse_v_w_mask, gen_helper_vse_v_d_mask } },
281 /* unmasked unit stride store */
282 { { gen_helper_vsb_v_b, gen_helper_vsb_v_h,
283 gen_helper_vsb_v_w, gen_helper_vsb_v_d },
284 { NULL, gen_helper_vsh_v_h,
285 gen_helper_vsh_v_w, gen_helper_vsh_v_d },
286 { NULL, NULL,
287 gen_helper_vsw_v_w, gen_helper_vsw_v_d },
288 { gen_helper_vse_v_b, gen_helper_vse_v_h,
289 gen_helper_vse_v_w, gen_helper_vse_v_d } }
290 };
291
292 fn = fns[a->vm][seq][s->sew];
293 if (fn == NULL) {
294 return false;
295 }
296
297 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
298 data = FIELD_DP32(data, VDATA, VM, a->vm);
299 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
300 data = FIELD_DP32(data, VDATA, NF, a->nf);
301 return ldst_us_trans(a->rd, a->rs1, data, fn, s);
302 }
303
304 static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
305 {
306 return (vext_check_isa_ill(s) &&
307 vext_check_reg(s, a->rd, false) &&
308 vext_check_nf(s, a->nf));
309 }
310
311 GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
312 GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
313 GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
314 GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
315
316 /*
317 *** stride load and store
318 */
319 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
320 TCGv, TCGv_env, TCGv_i32);
321
322 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
323 uint32_t data, gen_helper_ldst_stride *fn,
324 DisasContext *s)
325 {
326 TCGv_ptr dest, mask;
327 TCGv base, stride;
328 TCGv_i32 desc;
329
330 TCGLabel *over = gen_new_label();
331 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
332
333 dest = tcg_temp_new_ptr();
334 mask = tcg_temp_new_ptr();
335 base = tcg_temp_new();
336 stride = tcg_temp_new();
337 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
338
339 gen_get_gpr(base, rs1);
340 gen_get_gpr(stride, rs2);
341 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
342 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
343
344 fn(dest, mask, base, stride, cpu_env, desc);
345
346 tcg_temp_free_ptr(dest);
347 tcg_temp_free_ptr(mask);
348 tcg_temp_free(base);
349 tcg_temp_free(stride);
350 tcg_temp_free_i32(desc);
351 gen_set_label(over);
352 return true;
353 }
354
355 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
356 {
357 uint32_t data = 0;
358 gen_helper_ldst_stride *fn;
359 static gen_helper_ldst_stride * const fns[7][4] = {
360 { gen_helper_vlsb_v_b, gen_helper_vlsb_v_h,
361 gen_helper_vlsb_v_w, gen_helper_vlsb_v_d },
362 { NULL, gen_helper_vlsh_v_h,
363 gen_helper_vlsh_v_w, gen_helper_vlsh_v_d },
364 { NULL, NULL,
365 gen_helper_vlsw_v_w, gen_helper_vlsw_v_d },
366 { gen_helper_vlse_v_b, gen_helper_vlse_v_h,
367 gen_helper_vlse_v_w, gen_helper_vlse_v_d },
368 { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
369 gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
370 { NULL, gen_helper_vlshu_v_h,
371 gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
372 { NULL, NULL,
373 gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
374 };
375
376 fn = fns[seq][s->sew];
377 if (fn == NULL) {
378 return false;
379 }
380
381 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
382 data = FIELD_DP32(data, VDATA, VM, a->vm);
383 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
384 data = FIELD_DP32(data, VDATA, NF, a->nf);
385 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
386 }
387
388 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
389 {
390 return (vext_check_isa_ill(s) &&
391 vext_check_overlap_mask(s, a->rd, a->vm, false) &&
392 vext_check_reg(s, a->rd, false) &&
393 vext_check_nf(s, a->nf));
394 }
395
396 GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
397 GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
398 GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
399 GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
400 GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
401 GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
402 GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
403
404 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
405 {
406 uint32_t data = 0;
407 gen_helper_ldst_stride *fn;
408 static gen_helper_ldst_stride * const fns[4][4] = {
409 /* masked stride store */
410 { gen_helper_vssb_v_b, gen_helper_vssb_v_h,
411 gen_helper_vssb_v_w, gen_helper_vssb_v_d },
412 { NULL, gen_helper_vssh_v_h,
413 gen_helper_vssh_v_w, gen_helper_vssh_v_d },
414 { NULL, NULL,
415 gen_helper_vssw_v_w, gen_helper_vssw_v_d },
416 { gen_helper_vsse_v_b, gen_helper_vsse_v_h,
417 gen_helper_vsse_v_w, gen_helper_vsse_v_d }
418 };
419
420 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
421 data = FIELD_DP32(data, VDATA, VM, a->vm);
422 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
423 data = FIELD_DP32(data, VDATA, NF, a->nf);
424 fn = fns[seq][s->sew];
425 if (fn == NULL) {
426 return false;
427 }
428
429 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
430 }
431
432 static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
433 {
434 return (vext_check_isa_ill(s) &&
435 vext_check_reg(s, a->rd, false) &&
436 vext_check_nf(s, a->nf));
437 }
438
439 GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
440 GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
441 GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
442 GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)
443
444 /*
445 *** index load and store
446 */
447 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
448 TCGv_ptr, TCGv_env, TCGv_i32);
449
450 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
451 uint32_t data, gen_helper_ldst_index *fn,
452 DisasContext *s)
453 {
454 TCGv_ptr dest, mask, index;
455 TCGv base;
456 TCGv_i32 desc;
457
458 TCGLabel *over = gen_new_label();
459 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
460
461 dest = tcg_temp_new_ptr();
462 mask = tcg_temp_new_ptr();
463 index = tcg_temp_new_ptr();
464 base = tcg_temp_new();
465 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
466
467 gen_get_gpr(base, rs1);
468 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
469 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
470 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
471
472 fn(dest, mask, base, index, cpu_env, desc);
473
474 tcg_temp_free_ptr(dest);
475 tcg_temp_free_ptr(mask);
476 tcg_temp_free_ptr(index);
477 tcg_temp_free(base);
478 tcg_temp_free_i32(desc);
479 gen_set_label(over);
480 return true;
481 }
482
483 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
484 {
485 uint32_t data = 0;
486 gen_helper_ldst_index *fn;
487 static gen_helper_ldst_index * const fns[7][4] = {
488 { gen_helper_vlxb_v_b, gen_helper_vlxb_v_h,
489 gen_helper_vlxb_v_w, gen_helper_vlxb_v_d },
490 { NULL, gen_helper_vlxh_v_h,
491 gen_helper_vlxh_v_w, gen_helper_vlxh_v_d },
492 { NULL, NULL,
493 gen_helper_vlxw_v_w, gen_helper_vlxw_v_d },
494 { gen_helper_vlxe_v_b, gen_helper_vlxe_v_h,
495 gen_helper_vlxe_v_w, gen_helper_vlxe_v_d },
496 { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
497 gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
498 { NULL, gen_helper_vlxhu_v_h,
499 gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
500 { NULL, NULL,
501 gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
502 };
503
504 fn = fns[seq][s->sew];
505 if (fn == NULL) {
506 return false;
507 }
508
509 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
510 data = FIELD_DP32(data, VDATA, VM, a->vm);
511 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
512 data = FIELD_DP32(data, VDATA, NF, a->nf);
513 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
514 }
515
516 static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
517 {
518 return (vext_check_isa_ill(s) &&
519 vext_check_overlap_mask(s, a->rd, a->vm, false) &&
520 vext_check_reg(s, a->rd, false) &&
521 vext_check_reg(s, a->rs2, false) &&
522 vext_check_nf(s, a->nf));
523 }
524
525 GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
526 GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
527 GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
528 GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
529 GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
530 GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
531 GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
532
533 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
534 {
535 uint32_t data = 0;
536 gen_helper_ldst_index *fn;
537 static gen_helper_ldst_index * const fns[4][4] = {
538 { gen_helper_vsxb_v_b, gen_helper_vsxb_v_h,
539 gen_helper_vsxb_v_w, gen_helper_vsxb_v_d },
540 { NULL, gen_helper_vsxh_v_h,
541 gen_helper_vsxh_v_w, gen_helper_vsxh_v_d },
542 { NULL, NULL,
543 gen_helper_vsxw_v_w, gen_helper_vsxw_v_d },
544 { gen_helper_vsxe_v_b, gen_helper_vsxe_v_h,
545 gen_helper_vsxe_v_w, gen_helper_vsxe_v_d }
546 };
547
548 fn = fns[seq][s->sew];
549 if (fn == NULL) {
550 return false;
551 }
552
553 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
554 data = FIELD_DP32(data, VDATA, VM, a->vm);
555 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
556 data = FIELD_DP32(data, VDATA, NF, a->nf);
557 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
558 }
559
560 static bool st_index_check(DisasContext *s, arg_rnfvm* a)
561 {
562 return (vext_check_isa_ill(s) &&
563 vext_check_reg(s, a->rd, false) &&
564 vext_check_reg(s, a->rs2, false) &&
565 vext_check_nf(s, a->nf));
566 }
567
568 GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
569 GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
570 GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
571 GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
572
573 /*
574 *** unit stride fault-only-first load
575 */
576 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
577 gen_helper_ldst_us *fn, DisasContext *s)
578 {
579 TCGv_ptr dest, mask;
580 TCGv base;
581 TCGv_i32 desc;
582
583 TCGLabel *over = gen_new_label();
584 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
585
586 dest = tcg_temp_new_ptr();
587 mask = tcg_temp_new_ptr();
588 base = tcg_temp_new();
589 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
590
591 gen_get_gpr(base, rs1);
592 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
593 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
594
595 fn(dest, mask, base, cpu_env, desc);
596
597 tcg_temp_free_ptr(dest);
598 tcg_temp_free_ptr(mask);
599 tcg_temp_free(base);
600 tcg_temp_free_i32(desc);
601 gen_set_label(over);
602 return true;
603 }
604
605 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
606 {
607 uint32_t data = 0;
608 gen_helper_ldst_us *fn;
609 static gen_helper_ldst_us * const fns[7][4] = {
610 { gen_helper_vlbff_v_b, gen_helper_vlbff_v_h,
611 gen_helper_vlbff_v_w, gen_helper_vlbff_v_d },
612 { NULL, gen_helper_vlhff_v_h,
613 gen_helper_vlhff_v_w, gen_helper_vlhff_v_d },
614 { NULL, NULL,
615 gen_helper_vlwff_v_w, gen_helper_vlwff_v_d },
616 { gen_helper_vleff_v_b, gen_helper_vleff_v_h,
617 gen_helper_vleff_v_w, gen_helper_vleff_v_d },
618 { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h,
619 gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d },
620 { NULL, gen_helper_vlhuff_v_h,
621 gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d },
622 { NULL, NULL,
623 gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d }
624 };
625
626 fn = fns[seq][s->sew];
627 if (fn == NULL) {
628 return false;
629 }
630
631 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
632 data = FIELD_DP32(data, VDATA, VM, a->vm);
633 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
634 data = FIELD_DP32(data, VDATA, NF, a->nf);
635 return ldff_trans(a->rd, a->rs1, data, fn, s);
636 }
637
638 GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check)
639 GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check)
640 GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check)
641 GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
642 GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
643 GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
644 GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
645
646 /*
647 *** vector atomic operation
648 */
649 typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
650 TCGv_env, TCGv_i32);
651
652 static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
653 uint32_t data, gen_helper_amo *fn, DisasContext *s)
654 {
655 TCGv_ptr dest, mask, index;
656 TCGv base;
657 TCGv_i32 desc;
658
659 TCGLabel *over = gen_new_label();
660 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
661
662 dest = tcg_temp_new_ptr();
663 mask = tcg_temp_new_ptr();
664 index = tcg_temp_new_ptr();
665 base = tcg_temp_new();
666 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
667
668 gen_get_gpr(base, rs1);
669 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
670 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
671 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
672
673 fn(dest, mask, base, index, cpu_env, desc);
674
675 tcg_temp_free_ptr(dest);
676 tcg_temp_free_ptr(mask);
677 tcg_temp_free_ptr(index);
678 tcg_temp_free(base);
679 tcg_temp_free_i32(desc);
680 gen_set_label(over);
681 return true;
682 }
683
684 static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
685 {
686 uint32_t data = 0;
687 gen_helper_amo *fn;
688 static gen_helper_amo *const fnsw[9] = {
689 /* no atomic operation */
690 gen_helper_vamoswapw_v_w,
691 gen_helper_vamoaddw_v_w,
692 gen_helper_vamoxorw_v_w,
693 gen_helper_vamoandw_v_w,
694 gen_helper_vamoorw_v_w,
695 gen_helper_vamominw_v_w,
696 gen_helper_vamomaxw_v_w,
697 gen_helper_vamominuw_v_w,
698 gen_helper_vamomaxuw_v_w
699 };
700 #ifdef TARGET_RISCV64
701 static gen_helper_amo *const fnsd[18] = {
702 gen_helper_vamoswapw_v_d,
703 gen_helper_vamoaddw_v_d,
704 gen_helper_vamoxorw_v_d,
705 gen_helper_vamoandw_v_d,
706 gen_helper_vamoorw_v_d,
707 gen_helper_vamominw_v_d,
708 gen_helper_vamomaxw_v_d,
709 gen_helper_vamominuw_v_d,
710 gen_helper_vamomaxuw_v_d,
711 gen_helper_vamoswapd_v_d,
712 gen_helper_vamoaddd_v_d,
713 gen_helper_vamoxord_v_d,
714 gen_helper_vamoandd_v_d,
715 gen_helper_vamoord_v_d,
716 gen_helper_vamomind_v_d,
717 gen_helper_vamomaxd_v_d,
718 gen_helper_vamominud_v_d,
719 gen_helper_vamomaxud_v_d
720 };
721 #endif
722
723 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
724 gen_helper_exit_atomic(cpu_env);
725 s->base.is_jmp = DISAS_NORETURN;
726 return true;
727 } else {
728 if (s->sew == 3) {
729 #ifdef TARGET_RISCV64
730 fn = fnsd[seq];
731 #else
732 /* Check done in amo_check(). */
733 g_assert_not_reached();
734 #endif
735 } else {
736 fn = fnsw[seq];
737 }
738 }
739
740 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
741 data = FIELD_DP32(data, VDATA, VM, a->vm);
742 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
743 data = FIELD_DP32(data, VDATA, WD, a->wd);
744 return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
745 }
746 /*
747 * There are two rules check here.
748 *
749 * 1. SEW must be at least as wide as the AMO memory element size.
750 *
751 * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
752 */
753 static bool amo_check(DisasContext *s, arg_rwdvm* a)
754 {
755 return (!s->vill && has_ext(s, RVA) &&
756 (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
757 vext_check_reg(s, a->rd, false) &&
758 vext_check_reg(s, a->rs2, false) &&
759 ((1 << s->sew) <= sizeof(target_ulong)) &&
760 ((1 << s->sew) >= 4));
761 }
762
763 GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
764 GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
765 GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
766 GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
767 GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
768 GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
769 GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
770 GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
771 GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
772 #ifdef TARGET_RISCV64
773 GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check)
774 GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check)
775 GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check)
776 GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check)
777 GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check)
778 GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check)
779 GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check)
780 GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check)
781 GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
782 #endif
783
784 /*
785 *** Vector Integer Arithmetic Instructions
786 */
787 #define MAXSZ(s) (s->vlen >> (3 - s->lmul))
788
789 static bool opivv_check(DisasContext *s, arg_rmrr *a)
790 {
791 return (vext_check_isa_ill(s) &&
792 vext_check_overlap_mask(s, a->rd, a->vm, false) &&
793 vext_check_reg(s, a->rd, false) &&
794 vext_check_reg(s, a->rs2, false) &&
795 vext_check_reg(s, a->rs1, false));
796 }
797
798 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
799 uint32_t, uint32_t, uint32_t);
800
801 static inline bool
802 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
803 gen_helper_gvec_4_ptr *fn)
804 {
805 TCGLabel *over = gen_new_label();
806 if (!opivv_check(s, a)) {
807 return false;
808 }
809
810 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
811
812 if (a->vm && s->vl_eq_vlmax) {
813 gvec_fn(s->sew, vreg_ofs(s, a->rd),
814 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
815 MAXSZ(s), MAXSZ(s));
816 } else {
817 uint32_t data = 0;
818
819 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
820 data = FIELD_DP32(data, VDATA, VM, a->vm);
821 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
822 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
823 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
824 cpu_env, 0, s->vlen / 8, data, fn);
825 }
826 gen_set_label(over);
827 return true;
828 }
829
830 /* OPIVV with GVEC IR */
831 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
832 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
833 { \
834 static gen_helper_gvec_4_ptr * const fns[4] = { \
835 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
836 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
837 }; \
838 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
839 }
840
841 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
842 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
843
844 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
845 TCGv_env, TCGv_i32);
846
847 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
848 gen_helper_opivx *fn, DisasContext *s)
849 {
850 TCGv_ptr dest, src2, mask;
851 TCGv src1;
852 TCGv_i32 desc;
853 uint32_t data = 0;
854
855 TCGLabel *over = gen_new_label();
856 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
857
858 dest = tcg_temp_new_ptr();
859 mask = tcg_temp_new_ptr();
860 src2 = tcg_temp_new_ptr();
861 src1 = tcg_temp_new();
862 gen_get_gpr(src1, rs1);
863
864 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
865 data = FIELD_DP32(data, VDATA, VM, vm);
866 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
867 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
868
869 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
870 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
871 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
872
873 fn(dest, mask, src1, src2, cpu_env, desc);
874
875 tcg_temp_free_ptr(dest);
876 tcg_temp_free_ptr(mask);
877 tcg_temp_free_ptr(src2);
878 tcg_temp_free(src1);
879 tcg_temp_free_i32(desc);
880 gen_set_label(over);
881 return true;
882 }
883
884 static bool opivx_check(DisasContext *s, arg_rmrr *a)
885 {
886 return (vext_check_isa_ill(s) &&
887 vext_check_overlap_mask(s, a->rd, a->vm, false) &&
888 vext_check_reg(s, a->rd, false) &&
889 vext_check_reg(s, a->rs2, false));
890 }
891
892 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
893 uint32_t, uint32_t);
894
895 static inline bool
896 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
897 gen_helper_opivx *fn)
898 {
899 if (!opivx_check(s, a)) {
900 return false;
901 }
902
903 if (a->vm && s->vl_eq_vlmax) {
904 TCGv_i64 src1 = tcg_temp_new_i64();
905 TCGv tmp = tcg_temp_new();
906
907 gen_get_gpr(tmp, a->rs1);
908 tcg_gen_ext_tl_i64(src1, tmp);
909 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
910 src1, MAXSZ(s), MAXSZ(s));
911
912 tcg_temp_free_i64(src1);
913 tcg_temp_free(tmp);
914 return true;
915 }
916 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
917 }
918
919 /* OPIVX with GVEC IR */
920 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
921 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
922 { \
923 static gen_helper_opivx * const fns[4] = { \
924 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
925 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
926 }; \
927 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
928 }
929
930 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
931 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
932
933 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
934 {
935 tcg_gen_vec_sub8_i64(d, b, a);
936 }
937
938 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
939 {
940 tcg_gen_vec_sub8_i64(d, b, a);
941 }
942
943 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
944 {
945 tcg_gen_sub_i32(ret, arg2, arg1);
946 }
947
948 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
949 {
950 tcg_gen_sub_i64(ret, arg2, arg1);
951 }
952
953 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
954 {
955 tcg_gen_sub_vec(vece, r, b, a);
956 }
957
958 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
959 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
960 {
961 static const GVecGen2s rsub_op[4] = {
962 { .fni8 = gen_vec_rsub8_i64,
963 .fniv = gen_rsub_vec,
964 .fno = gen_helper_vec_rsubs8,
965 .vece = MO_8 },
966 { .fni8 = gen_vec_rsub16_i64,
967 .fniv = gen_rsub_vec,
968 .fno = gen_helper_vec_rsubs16,
969 .vece = MO_16 },
970 { .fni4 = gen_rsub_i32,
971 .fniv = gen_rsub_vec,
972 .fno = gen_helper_vec_rsubs32,
973 .vece = MO_32 },
974 { .fni8 = gen_rsub_i64,
975 .fniv = gen_rsub_vec,
976 .fno = gen_helper_vec_rsubs64,
977 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
978 .vece = MO_64 },
979 };
980
981 tcg_debug_assert(vece <= MO_64);
982 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
983 }
984
985 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
986
987 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
988 gen_helper_opivx *fn, DisasContext *s, int zx)
989 {
990 TCGv_ptr dest, src2, mask;
991 TCGv src1;
992 TCGv_i32 desc;
993 uint32_t data = 0;
994
995 TCGLabel *over = gen_new_label();
996 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
997
998 dest = tcg_temp_new_ptr();
999 mask = tcg_temp_new_ptr();
1000 src2 = tcg_temp_new_ptr();
1001 if (zx) {
1002 src1 = tcg_const_tl(imm);
1003 } else {
1004 src1 = tcg_const_tl(sextract64(imm, 0, 5));
1005 }
1006 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1007 data = FIELD_DP32(data, VDATA, VM, vm);
1008 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1009 desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
1010
1011 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1012 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1013 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1014
1015 fn(dest, mask, src1, src2, cpu_env, desc);
1016
1017 tcg_temp_free_ptr(dest);
1018 tcg_temp_free_ptr(mask);
1019 tcg_temp_free_ptr(src2);
1020 tcg_temp_free(src1);
1021 tcg_temp_free_i32(desc);
1022 gen_set_label(over);
1023 return true;
1024 }
1025
1026 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1027 uint32_t, uint32_t);
1028
1029 static inline bool
1030 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1031 gen_helper_opivx *fn, int zx)
1032 {
1033 if (!opivx_check(s, a)) {
1034 return false;
1035 }
1036
1037 if (a->vm && s->vl_eq_vlmax) {
1038 if (zx) {
1039 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1040 extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
1041 } else {
1042 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1043 sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
1044 }
1045 } else {
1046 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
1047 }
1048 return true;
1049 }
1050
1051 /* OPIVI with GVEC IR */
1052 #define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \
1053 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1054 { \
1055 static gen_helper_opivx * const fns[4] = { \
1056 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1057 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1058 }; \
1059 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1060 fns[s->sew], ZX); \
1061 }
1062
1063 GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
1064
1065 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1066 int64_t c, uint32_t oprsz, uint32_t maxsz)
1067 {
1068 TCGv_i64 tmp = tcg_const_i64(c);
1069 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1070 tcg_temp_free_i64(tmp);
1071 }
1072
1073 GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
1074
1075 /* Vector Widening Integer Add/Subtract */
1076
1077 /* OPIVV with WIDEN */
1078 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1079 {
1080 return (vext_check_isa_ill(s) &&
1081 vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1082 vext_check_reg(s, a->rd, true) &&
1083 vext_check_reg(s, a->rs2, false) &&
1084 vext_check_reg(s, a->rs1, false) &&
1085 vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1086 1 << s->lmul) &&
1087 vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1088 1 << s->lmul) &&
1089 (s->lmul < 0x3) && (s->sew < 0x3));
1090 }
1091
1092 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1093 gen_helper_gvec_4_ptr *fn,
1094 bool (*checkfn)(DisasContext *, arg_rmrr *))
1095 {
1096 if (checkfn(s, a)) {
1097 uint32_t data = 0;
1098 TCGLabel *over = gen_new_label();
1099 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1100
1101 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1102 data = FIELD_DP32(data, VDATA, VM, a->vm);
1103 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1104 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1105 vreg_ofs(s, a->rs1),
1106 vreg_ofs(s, a->rs2),
1107 cpu_env, 0, s->vlen / 8,
1108 data, fn);
1109 gen_set_label(over);
1110 return true;
1111 }
1112 return false;
1113 }
1114
1115 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1116 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1117 { \
1118 static gen_helper_gvec_4_ptr * const fns[3] = { \
1119 gen_helper_##NAME##_b, \
1120 gen_helper_##NAME##_h, \
1121 gen_helper_##NAME##_w \
1122 }; \
1123 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1124 }
1125
1126 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1127 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1128 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1129 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1130
1131 /* OPIVX with WIDEN */
1132 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1133 {
1134 return (vext_check_isa_ill(s) &&
1135 vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1136 vext_check_reg(s, a->rd, true) &&
1137 vext_check_reg(s, a->rs2, false) &&
1138 vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1139 1 << s->lmul) &&
1140 (s->lmul < 0x3) && (s->sew < 0x3));
1141 }
1142
1143 static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1144 gen_helper_opivx *fn)
1145 {
1146 if (opivx_widen_check(s, a)) {
1147 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1148 }
1149 return true;
1150 }
1151
1152 #define GEN_OPIVX_WIDEN_TRANS(NAME) \
1153 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1154 { \
1155 static gen_helper_opivx * const fns[3] = { \
1156 gen_helper_##NAME##_b, \
1157 gen_helper_##NAME##_h, \
1158 gen_helper_##NAME##_w \
1159 }; \
1160 return do_opivx_widen(s, a, fns[s->sew]); \
1161 }
1162
1163 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1164 GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1165 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1166 GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1167
1168 /* WIDEN OPIVV with WIDEN */
1169 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1170 {
1171 return (vext_check_isa_ill(s) &&
1172 vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1173 vext_check_reg(s, a->rd, true) &&
1174 vext_check_reg(s, a->rs2, true) &&
1175 vext_check_reg(s, a->rs1, false) &&
1176 vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1177 1 << s->lmul) &&
1178 (s->lmul < 0x3) && (s->sew < 0x3));
1179 }
1180
1181 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1182 gen_helper_gvec_4_ptr *fn)
1183 {
1184 if (opiwv_widen_check(s, a)) {
1185 uint32_t data = 0;
1186 TCGLabel *over = gen_new_label();
1187 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1188
1189 data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1190 data = FIELD_DP32(data, VDATA, VM, a->vm);
1191 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1192 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1193 vreg_ofs(s, a->rs1),
1194 vreg_ofs(s, a->rs2),
1195 cpu_env, 0, s->vlen / 8, data, fn);
1196 gen_set_label(over);
1197 return true;
1198 }
1199 return false;
1200 }
1201
1202 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1203 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1204 { \
1205 static gen_helper_gvec_4_ptr * const fns[3] = { \
1206 gen_helper_##NAME##_b, \
1207 gen_helper_##NAME##_h, \
1208 gen_helper_##NAME##_w \
1209 }; \
1210 return do_opiwv_widen(s, a, fns[s->sew]); \
1211 }
1212
1213 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1214 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1215 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1216 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1217
1218 /* WIDEN OPIVX with WIDEN */
1219 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1220 {
1221 return (vext_check_isa_ill(s) &&
1222 vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1223 vext_check_reg(s, a->rd, true) &&
1224 vext_check_reg(s, a->rs2, true) &&
1225 (s->lmul < 0x3) && (s->sew < 0x3));
1226 }
1227
1228 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1229 gen_helper_opivx *fn)
1230 {
1231 if (opiwx_widen_check(s, a)) {
1232 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1233 }
1234 return false;
1235 }
1236
1237 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1238 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1239 { \
1240 static gen_helper_opivx * const fns[3] = { \
1241 gen_helper_##NAME##_b, \
1242 gen_helper_##NAME##_h, \
1243 gen_helper_##NAME##_w \
1244 }; \
1245 return do_opiwx_widen(s, a, fns[s->sew]); \
1246 }
1247
1248 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1249 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1250 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1251 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1252
1253 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1254 /* OPIVV without GVEC IR */
1255 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1256 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1257 { \
1258 if (CHECK(s, a)) { \
1259 uint32_t data = 0; \
1260 static gen_helper_gvec_4_ptr * const fns[4] = { \
1261 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1262 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1263 }; \
1264 TCGLabel *over = gen_new_label(); \
1265 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1266 \
1267 data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
1268 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1269 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1270 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1271 vreg_ofs(s, a->rs1), \
1272 vreg_ofs(s, a->rs2), cpu_env, 0, \
1273 s->vlen / 8, data, fns[s->sew]); \
1274 gen_set_label(over); \
1275 return true; \
1276 } \
1277 return false; \
1278 }
1279
1280 /*
1281 * For vadc and vsbc, an illegal instruction exception is raised if the
1282 * destination vector register is v0 and LMUL > 1. (Section 12.3)
1283 */
1284 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1285 {
1286 return (vext_check_isa_ill(s) &&
1287 vext_check_reg(s, a->rd, false) &&
1288 vext_check_reg(s, a->rs2, false) &&
1289 vext_check_reg(s, a->rs1, false) &&
1290 ((a->rd != 0) || (s->lmul == 0)));
1291 }
1292
1293 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1294 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1295
1296 /*
1297 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1298 * destination vector register overlaps a source vector register group.
1299 */
1300 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1301 {
1302 return (vext_check_isa_ill(s) &&
1303 vext_check_reg(s, a->rs2, false) &&
1304 vext_check_reg(s, a->rs1, false) &&
1305 vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
1306 vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
1307 }
1308
1309 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1310 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1311
1312 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1313 {
1314 return (vext_check_isa_ill(s) &&
1315 vext_check_reg(s, a->rd, false) &&
1316 vext_check_reg(s, a->rs2, false) &&
1317 ((a->rd != 0) || (s->lmul == 0)));
1318 }
1319
1320 /* OPIVX without GVEC IR */
1321 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1322 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1323 { \
1324 if (CHECK(s, a)) { \
1325 static gen_helper_opivx * const fns[4] = { \
1326 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1327 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1328 }; \
1329 \
1330 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1331 } \
1332 return false; \
1333 }
1334
1335 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1336 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1337
1338 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1339 {
1340 return (vext_check_isa_ill(s) &&
1341 vext_check_reg(s, a->rs2, false) &&
1342 vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
1343 }
1344
1345 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1346 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1347
1348 /* OPIVI without GVEC IR */
1349 #define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK) \
1350 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1351 { \
1352 if (CHECK(s, a)) { \
1353 static gen_helper_opivx * const fns[4] = { \
1354 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1355 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1356 }; \
1357 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1358 fns[s->sew], s, ZX); \
1359 } \
1360 return false; \
1361 }
1362
1363 GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check)
1364 GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check)