target/arm/vec_helper: Handle oprsz less than 16 bytes in indexed operations
[qemu.git] / target / arm / vec_helper.c
1 /*
2 * ARM AdvSIMD / SVE Vector Operations
3 *
4 * Copyright (c) 2018 Linaro
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "tcg/tcg-gvec-desc.h"
24 #include "fpu/softfloat.h"
25 #include "vec_internal.h"
26
27 /* Note that vector data is stored in host-endian 64-bit chunks,
28 so addressing units smaller than that needs a host-endian fixup. */
29 #ifdef HOST_WORDS_BIGENDIAN
30 #define H1(x) ((x) ^ 7)
31 #define H2(x) ((x) ^ 3)
32 #define H4(x) ((x) ^ 1)
33 #else
34 #define H1(x) (x)
35 #define H2(x) (x)
36 #define H4(x) (x)
37 #endif
38
39 /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
40 static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
41 bool neg, bool round, uint32_t *sat)
42 {
43 /*
44 * Simplify:
45 * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
46 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
47 */
48 int32_t ret = (int32_t)src1 * src2;
49 if (neg) {
50 ret = -ret;
51 }
52 ret += ((int32_t)src3 << 15) + (round << 14);
53 ret >>= 15;
54
55 if (ret != (int16_t)ret) {
56 *sat = 1;
57 ret = (ret < 0 ? INT16_MIN : INT16_MAX);
58 }
59 return ret;
60 }
61
62 uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1,
63 uint32_t src2, uint32_t src3)
64 {
65 uint32_t *sat = &env->vfp.qc[0];
66 uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, false, true, sat);
67 uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
68 false, true, sat);
69 return deposit32(e1, 16, 16, e2);
70 }
71
72 void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm,
73 void *vq, uint32_t desc)
74 {
75 uintptr_t opr_sz = simd_oprsz(desc);
76 int16_t *d = vd;
77 int16_t *n = vn;
78 int16_t *m = vm;
79 uintptr_t i;
80
81 for (i = 0; i < opr_sz / 2; ++i) {
82 d[i] = do_sqrdmlah_h(n[i], m[i], d[i], false, true, vq);
83 }
84 clear_tail(d, opr_sz, simd_maxsz(desc));
85 }
86
87 uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1,
88 uint32_t src2, uint32_t src3)
89 {
90 uint32_t *sat = &env->vfp.qc[0];
91 uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, true, true, sat);
92 uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
93 true, true, sat);
94 return deposit32(e1, 16, 16, e2);
95 }
96
97 void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm,
98 void *vq, uint32_t desc)
99 {
100 uintptr_t opr_sz = simd_oprsz(desc);
101 int16_t *d = vd;
102 int16_t *n = vn;
103 int16_t *m = vm;
104 uintptr_t i;
105
106 for (i = 0; i < opr_sz / 2; ++i) {
107 d[i] = do_sqrdmlah_h(n[i], m[i], d[i], true, true, vq);
108 }
109 clear_tail(d, opr_sz, simd_maxsz(desc));
110 }
111
112 void HELPER(neon_sqdmulh_h)(void *vd, void *vn, void *vm,
113 void *vq, uint32_t desc)
114 {
115 intptr_t i, opr_sz = simd_oprsz(desc);
116 int16_t *d = vd, *n = vn, *m = vm;
117
118 for (i = 0; i < opr_sz / 2; ++i) {
119 d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, vq);
120 }
121 clear_tail(d, opr_sz, simd_maxsz(desc));
122 }
123
124 void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm,
125 void *vq, uint32_t desc)
126 {
127 intptr_t i, opr_sz = simd_oprsz(desc);
128 int16_t *d = vd, *n = vn, *m = vm;
129
130 for (i = 0; i < opr_sz / 2; ++i) {
131 d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, vq);
132 }
133 clear_tail(d, opr_sz, simd_maxsz(desc));
134 }
135
136 /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
137 static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
138 bool neg, bool round, uint32_t *sat)
139 {
140 /* Simplify similarly to int_qrdmlah_s16 above. */
141 int64_t ret = (int64_t)src1 * src2;
142 if (neg) {
143 ret = -ret;
144 }
145 ret += ((int64_t)src3 << 31) + (round << 30);
146 ret >>= 31;
147
148 if (ret != (int32_t)ret) {
149 *sat = 1;
150 ret = (ret < 0 ? INT32_MIN : INT32_MAX);
151 }
152 return ret;
153 }
154
155 uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1,
156 int32_t src2, int32_t src3)
157 {
158 uint32_t *sat = &env->vfp.qc[0];
159 return do_sqrdmlah_s(src1, src2, src3, false, true, sat);
160 }
161
162 void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm,
163 void *vq, uint32_t desc)
164 {
165 uintptr_t opr_sz = simd_oprsz(desc);
166 int32_t *d = vd;
167 int32_t *n = vn;
168 int32_t *m = vm;
169 uintptr_t i;
170
171 for (i = 0; i < opr_sz / 4; ++i) {
172 d[i] = do_sqrdmlah_s(n[i], m[i], d[i], false, true, vq);
173 }
174 clear_tail(d, opr_sz, simd_maxsz(desc));
175 }
176
177 uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1,
178 int32_t src2, int32_t src3)
179 {
180 uint32_t *sat = &env->vfp.qc[0];
181 return do_sqrdmlah_s(src1, src2, src3, true, true, sat);
182 }
183
184 void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm,
185 void *vq, uint32_t desc)
186 {
187 uintptr_t opr_sz = simd_oprsz(desc);
188 int32_t *d = vd;
189 int32_t *n = vn;
190 int32_t *m = vm;
191 uintptr_t i;
192
193 for (i = 0; i < opr_sz / 4; ++i) {
194 d[i] = do_sqrdmlah_s(n[i], m[i], d[i], true, true, vq);
195 }
196 clear_tail(d, opr_sz, simd_maxsz(desc));
197 }
198
199 void HELPER(neon_sqdmulh_s)(void *vd, void *vn, void *vm,
200 void *vq, uint32_t desc)
201 {
202 intptr_t i, opr_sz = simd_oprsz(desc);
203 int32_t *d = vd, *n = vn, *m = vm;
204
205 for (i = 0; i < opr_sz / 4; ++i) {
206 d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, vq);
207 }
208 clear_tail(d, opr_sz, simd_maxsz(desc));
209 }
210
211 void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm,
212 void *vq, uint32_t desc)
213 {
214 intptr_t i, opr_sz = simd_oprsz(desc);
215 int32_t *d = vd, *n = vn, *m = vm;
216
217 for (i = 0; i < opr_sz / 4; ++i) {
218 d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, vq);
219 }
220 clear_tail(d, opr_sz, simd_maxsz(desc));
221 }
222
223 /* Integer 8 and 16-bit dot-product.
224 *
225 * Note that for the loops herein, host endianness does not matter
226 * with respect to the ordering of data within the 64-bit lanes.
227 * All elements are treated equally, no matter where they are.
228 */
229
230 void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc)
231 {
232 intptr_t i, opr_sz = simd_oprsz(desc);
233 uint32_t *d = vd;
234 int8_t *n = vn, *m = vm;
235
236 for (i = 0; i < opr_sz / 4; ++i) {
237 d[i] += n[i * 4 + 0] * m[i * 4 + 0]
238 + n[i * 4 + 1] * m[i * 4 + 1]
239 + n[i * 4 + 2] * m[i * 4 + 2]
240 + n[i * 4 + 3] * m[i * 4 + 3];
241 }
242 clear_tail(d, opr_sz, simd_maxsz(desc));
243 }
244
245 void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc)
246 {
247 intptr_t i, opr_sz = simd_oprsz(desc);
248 uint32_t *d = vd;
249 uint8_t *n = vn, *m = vm;
250
251 for (i = 0; i < opr_sz / 4; ++i) {
252 d[i] += n[i * 4 + 0] * m[i * 4 + 0]
253 + n[i * 4 + 1] * m[i * 4 + 1]
254 + n[i * 4 + 2] * m[i * 4 + 2]
255 + n[i * 4 + 3] * m[i * 4 + 3];
256 }
257 clear_tail(d, opr_sz, simd_maxsz(desc));
258 }
259
260 void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc)
261 {
262 intptr_t i, opr_sz = simd_oprsz(desc);
263 uint64_t *d = vd;
264 int16_t *n = vn, *m = vm;
265
266 for (i = 0; i < opr_sz / 8; ++i) {
267 d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0]
268 + (int64_t)n[i * 4 + 1] * m[i * 4 + 1]
269 + (int64_t)n[i * 4 + 2] * m[i * 4 + 2]
270 + (int64_t)n[i * 4 + 3] * m[i * 4 + 3];
271 }
272 clear_tail(d, opr_sz, simd_maxsz(desc));
273 }
274
275 void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)
276 {
277 intptr_t i, opr_sz = simd_oprsz(desc);
278 uint64_t *d = vd;
279 uint16_t *n = vn, *m = vm;
280
281 for (i = 0; i < opr_sz / 8; ++i) {
282 d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0]
283 + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1]
284 + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2]
285 + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3];
286 }
287 clear_tail(d, opr_sz, simd_maxsz(desc));
288 }
289
290 void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
291 {
292 intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
293 intptr_t index = simd_data(desc);
294 uint32_t *d = vd;
295 int8_t *n = vn;
296 int8_t *m_indexed = (int8_t *)vm + index * 4;
297
298 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
299 * Otherwise opr_sz is a multiple of 16.
300 */
301 segend = MIN(4, opr_sz_4);
302 i = 0;
303 do {
304 int8_t m0 = m_indexed[i * 4 + 0];
305 int8_t m1 = m_indexed[i * 4 + 1];
306 int8_t m2 = m_indexed[i * 4 + 2];
307 int8_t m3 = m_indexed[i * 4 + 3];
308
309 do {
310 d[i] += n[i * 4 + 0] * m0
311 + n[i * 4 + 1] * m1
312 + n[i * 4 + 2] * m2
313 + n[i * 4 + 3] * m3;
314 } while (++i < segend);
315 segend = i + 4;
316 } while (i < opr_sz_4);
317
318 clear_tail(d, opr_sz, simd_maxsz(desc));
319 }
320
321 void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
322 {
323 intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
324 intptr_t index = simd_data(desc);
325 uint32_t *d = vd;
326 uint8_t *n = vn;
327 uint8_t *m_indexed = (uint8_t *)vm + index * 4;
328
329 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
330 * Otherwise opr_sz is a multiple of 16.
331 */
332 segend = MIN(4, opr_sz_4);
333 i = 0;
334 do {
335 uint8_t m0 = m_indexed[i * 4 + 0];
336 uint8_t m1 = m_indexed[i * 4 + 1];
337 uint8_t m2 = m_indexed[i * 4 + 2];
338 uint8_t m3 = m_indexed[i * 4 + 3];
339
340 do {
341 d[i] += n[i * 4 + 0] * m0
342 + n[i * 4 + 1] * m1
343 + n[i * 4 + 2] * m2
344 + n[i * 4 + 3] * m3;
345 } while (++i < segend);
346 segend = i + 4;
347 } while (i < opr_sz_4);
348
349 clear_tail(d, opr_sz, simd_maxsz(desc));
350 }
351
352 void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
353 {
354 intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
355 intptr_t index = simd_data(desc);
356 uint64_t *d = vd;
357 int16_t *n = vn;
358 int16_t *m_indexed = (int16_t *)vm + index * 4;
359
360 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
361 * Process the entire segment all at once, writing back the results
362 * only after we've consumed all of the inputs.
363 */
364 for (i = 0; i < opr_sz_8 ; i += 2) {
365 uint64_t d0, d1;
366
367 d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
368 d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
369 d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
370 d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
371 d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
372 d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
373 d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
374 d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
375
376 d[i + 0] += d0;
377 d[i + 1] += d1;
378 }
379
380 clear_tail(d, opr_sz, simd_maxsz(desc));
381 }
382
383 void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
384 {
385 intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
386 intptr_t index = simd_data(desc);
387 uint64_t *d = vd;
388 uint16_t *n = vn;
389 uint16_t *m_indexed = (uint16_t *)vm + index * 4;
390
391 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
392 * Process the entire segment all at once, writing back the results
393 * only after we've consumed all of the inputs.
394 */
395 for (i = 0; i < opr_sz_8 ; i += 2) {
396 uint64_t d0, d1;
397
398 d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
399 d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
400 d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
401 d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
402 d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
403 d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
404 d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
405 d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
406
407 d[i + 0] += d0;
408 d[i + 1] += d1;
409 }
410
411 clear_tail(d, opr_sz, simd_maxsz(desc));
412 }
413
414 void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
415 void *vfpst, uint32_t desc)
416 {
417 uintptr_t opr_sz = simd_oprsz(desc);
418 float16 *d = vd;
419 float16 *n = vn;
420 float16 *m = vm;
421 float_status *fpst = vfpst;
422 uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
423 uint32_t neg_imag = neg_real ^ 1;
424 uintptr_t i;
425
426 /* Shift boolean to the sign bit so we can xor to negate. */
427 neg_real <<= 15;
428 neg_imag <<= 15;
429
430 for (i = 0; i < opr_sz / 2; i += 2) {
431 float16 e0 = n[H2(i)];
432 float16 e1 = m[H2(i + 1)] ^ neg_imag;
433 float16 e2 = n[H2(i + 1)];
434 float16 e3 = m[H2(i)] ^ neg_real;
435
436 d[H2(i)] = float16_add(e0, e1, fpst);
437 d[H2(i + 1)] = float16_add(e2, e3, fpst);
438 }
439 clear_tail(d, opr_sz, simd_maxsz(desc));
440 }
441
442 void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm,
443 void *vfpst, uint32_t desc)
444 {
445 uintptr_t opr_sz = simd_oprsz(desc);
446 float32 *d = vd;
447 float32 *n = vn;
448 float32 *m = vm;
449 float_status *fpst = vfpst;
450 uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
451 uint32_t neg_imag = neg_real ^ 1;
452 uintptr_t i;
453
454 /* Shift boolean to the sign bit so we can xor to negate. */
455 neg_real <<= 31;
456 neg_imag <<= 31;
457
458 for (i = 0; i < opr_sz / 4; i += 2) {
459 float32 e0 = n[H4(i)];
460 float32 e1 = m[H4(i + 1)] ^ neg_imag;
461 float32 e2 = n[H4(i + 1)];
462 float32 e3 = m[H4(i)] ^ neg_real;
463
464 d[H4(i)] = float32_add(e0, e1, fpst);
465 d[H4(i + 1)] = float32_add(e2, e3, fpst);
466 }
467 clear_tail(d, opr_sz, simd_maxsz(desc));
468 }
469
470 void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
471 void *vfpst, uint32_t desc)
472 {
473 uintptr_t opr_sz = simd_oprsz(desc);
474 float64 *d = vd;
475 float64 *n = vn;
476 float64 *m = vm;
477 float_status *fpst = vfpst;
478 uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1);
479 uint64_t neg_imag = neg_real ^ 1;
480 uintptr_t i;
481
482 /* Shift boolean to the sign bit so we can xor to negate. */
483 neg_real <<= 63;
484 neg_imag <<= 63;
485
486 for (i = 0; i < opr_sz / 8; i += 2) {
487 float64 e0 = n[i];
488 float64 e1 = m[i + 1] ^ neg_imag;
489 float64 e2 = n[i + 1];
490 float64 e3 = m[i] ^ neg_real;
491
492 d[i] = float64_add(e0, e1, fpst);
493 d[i + 1] = float64_add(e2, e3, fpst);
494 }
495 clear_tail(d, opr_sz, simd_maxsz(desc));
496 }
497
498 void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
499 void *vfpst, uint32_t desc)
500 {
501 uintptr_t opr_sz = simd_oprsz(desc);
502 float16 *d = vd;
503 float16 *n = vn;
504 float16 *m = vm;
505 float_status *fpst = vfpst;
506 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
507 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
508 uint32_t neg_real = flip ^ neg_imag;
509 uintptr_t i;
510
511 /* Shift boolean to the sign bit so we can xor to negate. */
512 neg_real <<= 15;
513 neg_imag <<= 15;
514
515 for (i = 0; i < opr_sz / 2; i += 2) {
516 float16 e2 = n[H2(i + flip)];
517 float16 e1 = m[H2(i + flip)] ^ neg_real;
518 float16 e4 = e2;
519 float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
520
521 d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
522 d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
523 }
524 clear_tail(d, opr_sz, simd_maxsz(desc));
525 }
526
527 void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
528 void *vfpst, uint32_t desc)
529 {
530 uintptr_t opr_sz = simd_oprsz(desc);
531 float16 *d = vd;
532 float16 *n = vn;
533 float16 *m = vm;
534 float_status *fpst = vfpst;
535 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
536 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
537 intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
538 uint32_t neg_real = flip ^ neg_imag;
539 intptr_t elements = opr_sz / sizeof(float16);
540 intptr_t eltspersegment = 16 / sizeof(float16);
541 intptr_t i, j;
542
543 /* Shift boolean to the sign bit so we can xor to negate. */
544 neg_real <<= 15;
545 neg_imag <<= 15;
546
547 for (i = 0; i < elements; i += eltspersegment) {
548 float16 mr = m[H2(i + 2 * index + 0)];
549 float16 mi = m[H2(i + 2 * index + 1)];
550 float16 e1 = neg_real ^ (flip ? mi : mr);
551 float16 e3 = neg_imag ^ (flip ? mr : mi);
552
553 for (j = i; j < i + eltspersegment; j += 2) {
554 float16 e2 = n[H2(j + flip)];
555 float16 e4 = e2;
556
557 d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
558 d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
559 }
560 }
561 clear_tail(d, opr_sz, simd_maxsz(desc));
562 }
563
564 void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
565 void *vfpst, uint32_t desc)
566 {
567 uintptr_t opr_sz = simd_oprsz(desc);
568 float32 *d = vd;
569 float32 *n = vn;
570 float32 *m = vm;
571 float_status *fpst = vfpst;
572 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
573 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
574 uint32_t neg_real = flip ^ neg_imag;
575 uintptr_t i;
576
577 /* Shift boolean to the sign bit so we can xor to negate. */
578 neg_real <<= 31;
579 neg_imag <<= 31;
580
581 for (i = 0; i < opr_sz / 4; i += 2) {
582 float32 e2 = n[H4(i + flip)];
583 float32 e1 = m[H4(i + flip)] ^ neg_real;
584 float32 e4 = e2;
585 float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
586
587 d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
588 d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
589 }
590 clear_tail(d, opr_sz, simd_maxsz(desc));
591 }
592
593 void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
594 void *vfpst, uint32_t desc)
595 {
596 uintptr_t opr_sz = simd_oprsz(desc);
597 float32 *d = vd;
598 float32 *n = vn;
599 float32 *m = vm;
600 float_status *fpst = vfpst;
601 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
602 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
603 intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
604 uint32_t neg_real = flip ^ neg_imag;
605 intptr_t elements = opr_sz / sizeof(float32);
606 intptr_t eltspersegment = 16 / sizeof(float32);
607 intptr_t i, j;
608
609 /* Shift boolean to the sign bit so we can xor to negate. */
610 neg_real <<= 31;
611 neg_imag <<= 31;
612
613 for (i = 0; i < elements; i += eltspersegment) {
614 float32 mr = m[H4(i + 2 * index + 0)];
615 float32 mi = m[H4(i + 2 * index + 1)];
616 float32 e1 = neg_real ^ (flip ? mi : mr);
617 float32 e3 = neg_imag ^ (flip ? mr : mi);
618
619 for (j = i; j < i + eltspersegment; j += 2) {
620 float32 e2 = n[H4(j + flip)];
621 float32 e4 = e2;
622
623 d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
624 d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
625 }
626 }
627 clear_tail(d, opr_sz, simd_maxsz(desc));
628 }
629
630 void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
631 void *vfpst, uint32_t desc)
632 {
633 uintptr_t opr_sz = simd_oprsz(desc);
634 float64 *d = vd;
635 float64 *n = vn;
636 float64 *m = vm;
637 float_status *fpst = vfpst;
638 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
639 uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
640 uint64_t neg_real = flip ^ neg_imag;
641 uintptr_t i;
642
643 /* Shift boolean to the sign bit so we can xor to negate. */
644 neg_real <<= 63;
645 neg_imag <<= 63;
646
647 for (i = 0; i < opr_sz / 8; i += 2) {
648 float64 e2 = n[i + flip];
649 float64 e1 = m[i + flip] ^ neg_real;
650 float64 e4 = e2;
651 float64 e3 = m[i + 1 - flip] ^ neg_imag;
652
653 d[i] = float64_muladd(e2, e1, d[i], 0, fpst);
654 d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst);
655 }
656 clear_tail(d, opr_sz, simd_maxsz(desc));
657 }
658
659 /*
660 * Floating point comparisons producing an integer result (all 1s or all 0s).
661 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
662 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
663 */
664 static uint16_t float16_ceq(float16 op1, float16 op2, float_status *stat)
665 {
666 return -float16_eq_quiet(op1, op2, stat);
667 }
668
669 static uint32_t float32_ceq(float32 op1, float32 op2, float_status *stat)
670 {
671 return -float32_eq_quiet(op1, op2, stat);
672 }
673
674 static uint16_t float16_cge(float16 op1, float16 op2, float_status *stat)
675 {
676 return -float16_le(op2, op1, stat);
677 }
678
679 static uint32_t float32_cge(float32 op1, float32 op2, float_status *stat)
680 {
681 return -float32_le(op2, op1, stat);
682 }
683
684 static uint16_t float16_cgt(float16 op1, float16 op2, float_status *stat)
685 {
686 return -float16_lt(op2, op1, stat);
687 }
688
689 static uint32_t float32_cgt(float32 op1, float32 op2, float_status *stat)
690 {
691 return -float32_lt(op2, op1, stat);
692 }
693
694 static uint16_t float16_acge(float16 op1, float16 op2, float_status *stat)
695 {
696 return -float16_le(float16_abs(op2), float16_abs(op1), stat);
697 }
698
699 static uint32_t float32_acge(float32 op1, float32 op2, float_status *stat)
700 {
701 return -float32_le(float32_abs(op2), float32_abs(op1), stat);
702 }
703
704 static uint16_t float16_acgt(float16 op1, float16 op2, float_status *stat)
705 {
706 return -float16_lt(float16_abs(op2), float16_abs(op1), stat);
707 }
708
709 static uint32_t float32_acgt(float32 op1, float32 op2, float_status *stat)
710 {
711 return -float32_lt(float32_abs(op2), float32_abs(op1), stat);
712 }
713
714 static int16_t vfp_tosszh(float16 x, void *fpstp)
715 {
716 float_status *fpst = fpstp;
717 if (float16_is_any_nan(x)) {
718 float_raise(float_flag_invalid, fpst);
719 return 0;
720 }
721 return float16_to_int16_round_to_zero(x, fpst);
722 }
723
724 static uint16_t vfp_touszh(float16 x, void *fpstp)
725 {
726 float_status *fpst = fpstp;
727 if (float16_is_any_nan(x)) {
728 float_raise(float_flag_invalid, fpst);
729 return 0;
730 }
731 return float16_to_uint16_round_to_zero(x, fpst);
732 }
733
734 #define DO_2OP(NAME, FUNC, TYPE) \
735 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
736 { \
737 intptr_t i, oprsz = simd_oprsz(desc); \
738 TYPE *d = vd, *n = vn; \
739 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
740 d[i] = FUNC(n[i], stat); \
741 } \
742 clear_tail(d, oprsz, simd_maxsz(desc)); \
743 }
744
745 DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16)
746 DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32)
747 DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64)
748
749 DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
750 DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
751 DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
752
753 DO_2OP(gvec_vrintx_h, float16_round_to_int, float16)
754 DO_2OP(gvec_vrintx_s, float32_round_to_int, float32)
755
756 DO_2OP(gvec_sitos, helper_vfp_sitos, int32_t)
757 DO_2OP(gvec_uitos, helper_vfp_uitos, uint32_t)
758 DO_2OP(gvec_tosizs, helper_vfp_tosizs, float32)
759 DO_2OP(gvec_touizs, helper_vfp_touizs, float32)
760 DO_2OP(gvec_sstoh, int16_to_float16, int16_t)
761 DO_2OP(gvec_ustoh, uint16_to_float16, uint16_t)
762 DO_2OP(gvec_tosszh, vfp_tosszh, float16)
763 DO_2OP(gvec_touszh, vfp_touszh, float16)
764
765 #define WRAP_CMP0_FWD(FN, CMPOP, TYPE) \
766 static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
767 { \
768 return TYPE##_##CMPOP(op, TYPE##_zero, stat); \
769 }
770
771 #define WRAP_CMP0_REV(FN, CMPOP, TYPE) \
772 static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
773 { \
774 return TYPE##_##CMPOP(TYPE##_zero, op, stat); \
775 }
776
777 #define DO_2OP_CMP0(FN, CMPOP, DIRN) \
778 WRAP_CMP0_##DIRN(FN, CMPOP, float16) \
779 WRAP_CMP0_##DIRN(FN, CMPOP, float32) \
780 DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \
781 DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32)
782
783 DO_2OP_CMP0(cgt, cgt, FWD)
784 DO_2OP_CMP0(cge, cge, FWD)
785 DO_2OP_CMP0(ceq, ceq, FWD)
786 DO_2OP_CMP0(clt, cgt, REV)
787 DO_2OP_CMP0(cle, cge, REV)
788
789 #undef DO_2OP
790 #undef DO_2OP_CMP0
791
792 /* Floating-point trigonometric starting value.
793 * See the ARM ARM pseudocode function FPTrigSMul.
794 */
795 static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat)
796 {
797 float16 result = float16_mul(op1, op1, stat);
798 if (!float16_is_any_nan(result)) {
799 result = float16_set_sign(result, op2 & 1);
800 }
801 return result;
802 }
803
804 static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat)
805 {
806 float32 result = float32_mul(op1, op1, stat);
807 if (!float32_is_any_nan(result)) {
808 result = float32_set_sign(result, op2 & 1);
809 }
810 return result;
811 }
812
813 static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat)
814 {
815 float64 result = float64_mul(op1, op1, stat);
816 if (!float64_is_any_nan(result)) {
817 result = float64_set_sign(result, op2 & 1);
818 }
819 return result;
820 }
821
822 static float16 float16_abd(float16 op1, float16 op2, float_status *stat)
823 {
824 return float16_abs(float16_sub(op1, op2, stat));
825 }
826
827 static float32 float32_abd(float32 op1, float32 op2, float_status *stat)
828 {
829 return float32_abs(float32_sub(op1, op2, stat));
830 }
831
832 /*
833 * Reciprocal step. These are the AArch32 version which uses a
834 * non-fused multiply-and-subtract.
835 */
836 static float16 float16_recps_nf(float16 op1, float16 op2, float_status *stat)
837 {
838 op1 = float16_squash_input_denormal(op1, stat);
839 op2 = float16_squash_input_denormal(op2, stat);
840
841 if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
842 (float16_is_infinity(op2) && float16_is_zero(op1))) {
843 return float16_two;
844 }
845 return float16_sub(float16_two, float16_mul(op1, op2, stat), stat);
846 }
847
848 static float32 float32_recps_nf(float32 op1, float32 op2, float_status *stat)
849 {
850 op1 = float32_squash_input_denormal(op1, stat);
851 op2 = float32_squash_input_denormal(op2, stat);
852
853 if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
854 (float32_is_infinity(op2) && float32_is_zero(op1))) {
855 return float32_two;
856 }
857 return float32_sub(float32_two, float32_mul(op1, op2, stat), stat);
858 }
859
860 /* Reciprocal square-root step. AArch32 non-fused semantics. */
861 static float16 float16_rsqrts_nf(float16 op1, float16 op2, float_status *stat)
862 {
863 op1 = float16_squash_input_denormal(op1, stat);
864 op2 = float16_squash_input_denormal(op2, stat);
865
866 if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
867 (float16_is_infinity(op2) && float16_is_zero(op1))) {
868 return float16_one_point_five;
869 }
870 op1 = float16_sub(float16_three, float16_mul(op1, op2, stat), stat);
871 return float16_div(op1, float16_two, stat);
872 }
873
874 static float32 float32_rsqrts_nf(float32 op1, float32 op2, float_status *stat)
875 {
876 op1 = float32_squash_input_denormal(op1, stat);
877 op2 = float32_squash_input_denormal(op2, stat);
878
879 if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
880 (float32_is_infinity(op2) && float32_is_zero(op1))) {
881 return float32_one_point_five;
882 }
883 op1 = float32_sub(float32_three, float32_mul(op1, op2, stat), stat);
884 return float32_div(op1, float32_two, stat);
885 }
886
887 #define DO_3OP(NAME, FUNC, TYPE) \
888 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
889 { \
890 intptr_t i, oprsz = simd_oprsz(desc); \
891 TYPE *d = vd, *n = vn, *m = vm; \
892 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
893 d[i] = FUNC(n[i], m[i], stat); \
894 } \
895 clear_tail(d, oprsz, simd_maxsz(desc)); \
896 }
897
898 DO_3OP(gvec_fadd_h, float16_add, float16)
899 DO_3OP(gvec_fadd_s, float32_add, float32)
900 DO_3OP(gvec_fadd_d, float64_add, float64)
901
902 DO_3OP(gvec_fsub_h, float16_sub, float16)
903 DO_3OP(gvec_fsub_s, float32_sub, float32)
904 DO_3OP(gvec_fsub_d, float64_sub, float64)
905
906 DO_3OP(gvec_fmul_h, float16_mul, float16)
907 DO_3OP(gvec_fmul_s, float32_mul, float32)
908 DO_3OP(gvec_fmul_d, float64_mul, float64)
909
910 DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16)
911 DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32)
912 DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64)
913
914 DO_3OP(gvec_fabd_h, float16_abd, float16)
915 DO_3OP(gvec_fabd_s, float32_abd, float32)
916
917 DO_3OP(gvec_fceq_h, float16_ceq, float16)
918 DO_3OP(gvec_fceq_s, float32_ceq, float32)
919
920 DO_3OP(gvec_fcge_h, float16_cge, float16)
921 DO_3OP(gvec_fcge_s, float32_cge, float32)
922
923 DO_3OP(gvec_fcgt_h, float16_cgt, float16)
924 DO_3OP(gvec_fcgt_s, float32_cgt, float32)
925
926 DO_3OP(gvec_facge_h, float16_acge, float16)
927 DO_3OP(gvec_facge_s, float32_acge, float32)
928
929 DO_3OP(gvec_facgt_h, float16_acgt, float16)
930 DO_3OP(gvec_facgt_s, float32_acgt, float32)
931
932 DO_3OP(gvec_fmax_h, float16_max, float16)
933 DO_3OP(gvec_fmax_s, float32_max, float32)
934
935 DO_3OP(gvec_fmin_h, float16_min, float16)
936 DO_3OP(gvec_fmin_s, float32_min, float32)
937
938 DO_3OP(gvec_fmaxnum_h, float16_maxnum, float16)
939 DO_3OP(gvec_fmaxnum_s, float32_maxnum, float32)
940
941 DO_3OP(gvec_fminnum_h, float16_minnum, float16)
942 DO_3OP(gvec_fminnum_s, float32_minnum, float32)
943
944 DO_3OP(gvec_recps_nf_h, float16_recps_nf, float16)
945 DO_3OP(gvec_recps_nf_s, float32_recps_nf, float32)
946
947 DO_3OP(gvec_rsqrts_nf_h, float16_rsqrts_nf, float16)
948 DO_3OP(gvec_rsqrts_nf_s, float32_rsqrts_nf, float32)
949
950 #ifdef TARGET_AARCH64
951
952 DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
953 DO_3OP(gvec_recps_s, helper_recpsf_f32, float32)
954 DO_3OP(gvec_recps_d, helper_recpsf_f64, float64)
955
956 DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16)
957 DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32)
958 DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
959
960 #endif
961 #undef DO_3OP
962
963 /* Non-fused multiply-add (unlike float16_muladd etc, which are fused) */
964 static float16 float16_muladd_nf(float16 dest, float16 op1, float16 op2,
965 float_status *stat)
966 {
967 return float16_add(dest, float16_mul(op1, op2, stat), stat);
968 }
969
970 static float32 float32_muladd_nf(float32 dest, float32 op1, float32 op2,
971 float_status *stat)
972 {
973 return float32_add(dest, float32_mul(op1, op2, stat), stat);
974 }
975
976 static float16 float16_mulsub_nf(float16 dest, float16 op1, float16 op2,
977 float_status *stat)
978 {
979 return float16_sub(dest, float16_mul(op1, op2, stat), stat);
980 }
981
982 static float32 float32_mulsub_nf(float32 dest, float32 op1, float32 op2,
983 float_status *stat)
984 {
985 return float32_sub(dest, float32_mul(op1, op2, stat), stat);
986 }
987
988 /* Fused versions; these have the semantics Neon VFMA/VFMS want */
989 static float16 float16_muladd_f(float16 dest, float16 op1, float16 op2,
990 float_status *stat)
991 {
992 return float16_muladd(op1, op2, dest, 0, stat);
993 }
994
995 static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2,
996 float_status *stat)
997 {
998 return float32_muladd(op1, op2, dest, 0, stat);
999 }
1000
1001 static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2,
1002 float_status *stat)
1003 {
1004 return float16_muladd(float16_chs(op1), op2, dest, 0, stat);
1005 }
1006
1007 static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2,
1008 float_status *stat)
1009 {
1010 return float32_muladd(float32_chs(op1), op2, dest, 0, stat);
1011 }
1012
1013 #define DO_MULADD(NAME, FUNC, TYPE) \
1014 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
1015 { \
1016 intptr_t i, oprsz = simd_oprsz(desc); \
1017 TYPE *d = vd, *n = vn, *m = vm; \
1018 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1019 d[i] = FUNC(d[i], n[i], m[i], stat); \
1020 } \
1021 clear_tail(d, oprsz, simd_maxsz(desc)); \
1022 }
1023
1024 DO_MULADD(gvec_fmla_h, float16_muladd_nf, float16)
1025 DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32)
1026
1027 DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16)
1028 DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
1029
1030 DO_MULADD(gvec_vfma_h, float16_muladd_f, float16)
1031 DO_MULADD(gvec_vfma_s, float32_muladd_f, float32)
1032
1033 DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
1034 DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
1035
1036 /* For the indexed ops, SVE applies the index per 128-bit vector segment.
1037 * For AdvSIMD, there is of course only one such vector segment.
1038 */
1039
1040 #define DO_MUL_IDX(NAME, TYPE, H) \
1041 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1042 { \
1043 intptr_t i, j, oprsz = simd_oprsz(desc); \
1044 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1045 intptr_t idx = simd_data(desc); \
1046 TYPE *d = vd, *n = vn, *m = vm; \
1047 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1048 TYPE mm = m[H(i + idx)]; \
1049 for (j = 0; j < segment; j++) { \
1050 d[i + j] = n[i + j] * mm; \
1051 } \
1052 } \
1053 clear_tail(d, oprsz, simd_maxsz(desc)); \
1054 }
1055
1056 DO_MUL_IDX(gvec_mul_idx_h, uint16_t, H2)
1057 DO_MUL_IDX(gvec_mul_idx_s, uint32_t, H4)
1058 DO_MUL_IDX(gvec_mul_idx_d, uint64_t, )
1059
1060 #undef DO_MUL_IDX
1061
1062 #define DO_MLA_IDX(NAME, TYPE, OP, H) \
1063 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1064 { \
1065 intptr_t i, j, oprsz = simd_oprsz(desc); \
1066 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1067 intptr_t idx = simd_data(desc); \
1068 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1069 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1070 TYPE mm = m[H(i + idx)]; \
1071 for (j = 0; j < segment; j++) { \
1072 d[i + j] = a[i + j] OP n[i + j] * mm; \
1073 } \
1074 } \
1075 clear_tail(d, oprsz, simd_maxsz(desc)); \
1076 }
1077
1078 DO_MLA_IDX(gvec_mla_idx_h, uint16_t, +, H2)
1079 DO_MLA_IDX(gvec_mla_idx_s, uint32_t, +, H4)
1080 DO_MLA_IDX(gvec_mla_idx_d, uint64_t, +, )
1081
1082 DO_MLA_IDX(gvec_mls_idx_h, uint16_t, -, H2)
1083 DO_MLA_IDX(gvec_mls_idx_s, uint32_t, -, H4)
1084 DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, )
1085
1086 #undef DO_MLA_IDX
1087
1088 #define DO_FMUL_IDX(NAME, TYPE, H) \
1089 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
1090 { \
1091 intptr_t i, j, oprsz = simd_oprsz(desc); \
1092 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1093 intptr_t idx = simd_data(desc); \
1094 TYPE *d = vd, *n = vn, *m = vm; \
1095 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1096 TYPE mm = m[H(i + idx)]; \
1097 for (j = 0; j < segment; j++) { \
1098 d[i + j] = TYPE##_mul(n[i + j], mm, stat); \
1099 } \
1100 } \
1101 clear_tail(d, oprsz, simd_maxsz(desc)); \
1102 }
1103
1104 DO_FMUL_IDX(gvec_fmul_idx_h, float16, H2)
1105 DO_FMUL_IDX(gvec_fmul_idx_s, float32, H4)
1106 DO_FMUL_IDX(gvec_fmul_idx_d, float64, )
1107
1108 #undef DO_FMUL_IDX
1109
1110 #define DO_FMLA_IDX(NAME, TYPE, H) \
1111 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
1112 void *stat, uint32_t desc) \
1113 { \
1114 intptr_t i, j, oprsz = simd_oprsz(desc); \
1115 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1116 TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
1117 intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
1118 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1119 op1_neg <<= (8 * sizeof(TYPE) - 1); \
1120 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1121 TYPE mm = m[H(i + idx)]; \
1122 for (j = 0; j < segment; j++) { \
1123 d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
1124 mm, a[i + j], 0, stat); \
1125 } \
1126 } \
1127 clear_tail(d, oprsz, simd_maxsz(desc)); \
1128 }
1129
1130 DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2)
1131 DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4)
1132 DO_FMLA_IDX(gvec_fmla_idx_d, float64, )
1133
1134 #undef DO_FMLA_IDX
1135
1136 #define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
1137 void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
1138 { \
1139 intptr_t i, oprsz = simd_oprsz(desc); \
1140 TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
1141 bool q = false; \
1142 for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
1143 WTYPE dd = (WTYPE)n[i] OP m[i]; \
1144 if (dd < MIN) { \
1145 dd = MIN; \
1146 q = true; \
1147 } else if (dd > MAX) { \
1148 dd = MAX; \
1149 q = true; \
1150 } \
1151 d[i] = dd; \
1152 } \
1153 if (q) { \
1154 uint32_t *qc = vq; \
1155 qc[0] = 1; \
1156 } \
1157 clear_tail(d, oprsz, simd_maxsz(desc)); \
1158 }
1159
1160 DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX)
1161 DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX)
1162 DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX)
1163
1164 DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX)
1165 DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX)
1166 DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX)
1167
1168 DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX)
1169 DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX)
1170 DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX)
1171
1172 DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX)
1173 DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX)
1174 DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX)
1175
1176 #undef DO_SAT
1177
1178 void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn,
1179 void *vm, uint32_t desc)
1180 {
1181 intptr_t i, oprsz = simd_oprsz(desc);
1182 uint64_t *d = vd, *n = vn, *m = vm;
1183 bool q = false;
1184
1185 for (i = 0; i < oprsz / 8; i++) {
1186 uint64_t nn = n[i], mm = m[i], dd = nn + mm;
1187 if (dd < nn) {
1188 dd = UINT64_MAX;
1189 q = true;
1190 }
1191 d[i] = dd;
1192 }
1193 if (q) {
1194 uint32_t *qc = vq;
1195 qc[0] = 1;
1196 }
1197 clear_tail(d, oprsz, simd_maxsz(desc));
1198 }
1199
1200 void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn,
1201 void *vm, uint32_t desc)
1202 {
1203 intptr_t i, oprsz = simd_oprsz(desc);
1204 uint64_t *d = vd, *n = vn, *m = vm;
1205 bool q = false;
1206
1207 for (i = 0; i < oprsz / 8; i++) {
1208 uint64_t nn = n[i], mm = m[i], dd = nn - mm;
1209 if (nn < mm) {
1210 dd = 0;
1211 q = true;
1212 }
1213 d[i] = dd;
1214 }
1215 if (q) {
1216 uint32_t *qc = vq;
1217 qc[0] = 1;
1218 }
1219 clear_tail(d, oprsz, simd_maxsz(desc));
1220 }
1221
1222 void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn,
1223 void *vm, uint32_t desc)
1224 {
1225 intptr_t i, oprsz = simd_oprsz(desc);
1226 int64_t *d = vd, *n = vn, *m = vm;
1227 bool q = false;
1228
1229 for (i = 0; i < oprsz / 8; i++) {
1230 int64_t nn = n[i], mm = m[i], dd = nn + mm;
1231 if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) {
1232 dd = (nn >> 63) ^ ~INT64_MIN;
1233 q = true;
1234 }
1235 d[i] = dd;
1236 }
1237 if (q) {
1238 uint32_t *qc = vq;
1239 qc[0] = 1;
1240 }
1241 clear_tail(d, oprsz, simd_maxsz(desc));
1242 }
1243
1244 void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
1245 void *vm, uint32_t desc)
1246 {
1247 intptr_t i, oprsz = simd_oprsz(desc);
1248 int64_t *d = vd, *n = vn, *m = vm;
1249 bool q = false;
1250
1251 for (i = 0; i < oprsz / 8; i++) {
1252 int64_t nn = n[i], mm = m[i], dd = nn - mm;
1253 if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) {
1254 dd = (nn >> 63) ^ ~INT64_MIN;
1255 q = true;
1256 }
1257 d[i] = dd;
1258 }
1259 if (q) {
1260 uint32_t *qc = vq;
1261 qc[0] = 1;
1262 }
1263 clear_tail(d, oprsz, simd_maxsz(desc));
1264 }
1265
1266
1267 #define DO_SRA(NAME, TYPE) \
1268 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1269 { \
1270 intptr_t i, oprsz = simd_oprsz(desc); \
1271 int shift = simd_data(desc); \
1272 TYPE *d = vd, *n = vn; \
1273 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1274 d[i] += n[i] >> shift; \
1275 } \
1276 clear_tail(d, oprsz, simd_maxsz(desc)); \
1277 }
1278
1279 DO_SRA(gvec_ssra_b, int8_t)
1280 DO_SRA(gvec_ssra_h, int16_t)
1281 DO_SRA(gvec_ssra_s, int32_t)
1282 DO_SRA(gvec_ssra_d, int64_t)
1283
1284 DO_SRA(gvec_usra_b, uint8_t)
1285 DO_SRA(gvec_usra_h, uint16_t)
1286 DO_SRA(gvec_usra_s, uint32_t)
1287 DO_SRA(gvec_usra_d, uint64_t)
1288
1289 #undef DO_SRA
1290
1291 #define DO_RSHR(NAME, TYPE) \
1292 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1293 { \
1294 intptr_t i, oprsz = simd_oprsz(desc); \
1295 int shift = simd_data(desc); \
1296 TYPE *d = vd, *n = vn; \
1297 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1298 TYPE tmp = n[i] >> (shift - 1); \
1299 d[i] = (tmp >> 1) + (tmp & 1); \
1300 } \
1301 clear_tail(d, oprsz, simd_maxsz(desc)); \
1302 }
1303
1304 DO_RSHR(gvec_srshr_b, int8_t)
1305 DO_RSHR(gvec_srshr_h, int16_t)
1306 DO_RSHR(gvec_srshr_s, int32_t)
1307 DO_RSHR(gvec_srshr_d, int64_t)
1308
1309 DO_RSHR(gvec_urshr_b, uint8_t)
1310 DO_RSHR(gvec_urshr_h, uint16_t)
1311 DO_RSHR(gvec_urshr_s, uint32_t)
1312 DO_RSHR(gvec_urshr_d, uint64_t)
1313
1314 #undef DO_RSHR
1315
1316 #define DO_RSRA(NAME, TYPE) \
1317 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1318 { \
1319 intptr_t i, oprsz = simd_oprsz(desc); \
1320 int shift = simd_data(desc); \
1321 TYPE *d = vd, *n = vn; \
1322 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1323 TYPE tmp = n[i] >> (shift - 1); \
1324 d[i] += (tmp >> 1) + (tmp & 1); \
1325 } \
1326 clear_tail(d, oprsz, simd_maxsz(desc)); \
1327 }
1328
1329 DO_RSRA(gvec_srsra_b, int8_t)
1330 DO_RSRA(gvec_srsra_h, int16_t)
1331 DO_RSRA(gvec_srsra_s, int32_t)
1332 DO_RSRA(gvec_srsra_d, int64_t)
1333
1334 DO_RSRA(gvec_ursra_b, uint8_t)
1335 DO_RSRA(gvec_ursra_h, uint16_t)
1336 DO_RSRA(gvec_ursra_s, uint32_t)
1337 DO_RSRA(gvec_ursra_d, uint64_t)
1338
1339 #undef DO_RSRA
1340
1341 #define DO_SRI(NAME, TYPE) \
1342 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1343 { \
1344 intptr_t i, oprsz = simd_oprsz(desc); \
1345 int shift = simd_data(desc); \
1346 TYPE *d = vd, *n = vn; \
1347 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1348 d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \
1349 } \
1350 clear_tail(d, oprsz, simd_maxsz(desc)); \
1351 }
1352
1353 DO_SRI(gvec_sri_b, uint8_t)
1354 DO_SRI(gvec_sri_h, uint16_t)
1355 DO_SRI(gvec_sri_s, uint32_t)
1356 DO_SRI(gvec_sri_d, uint64_t)
1357
1358 #undef DO_SRI
1359
1360 #define DO_SLI(NAME, TYPE) \
1361 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1362 { \
1363 intptr_t i, oprsz = simd_oprsz(desc); \
1364 int shift = simd_data(desc); \
1365 TYPE *d = vd, *n = vn; \
1366 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1367 d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \
1368 } \
1369 clear_tail(d, oprsz, simd_maxsz(desc)); \
1370 }
1371
1372 DO_SLI(gvec_sli_b, uint8_t)
1373 DO_SLI(gvec_sli_h, uint16_t)
1374 DO_SLI(gvec_sli_s, uint32_t)
1375 DO_SLI(gvec_sli_d, uint64_t)
1376
1377 #undef DO_SLI
1378
1379 /*
1380 * Convert float16 to float32, raising no exceptions and
1381 * preserving exceptional values, including SNaN.
1382 * This is effectively an unpack+repack operation.
1383 */
1384 static float32 float16_to_float32_by_bits(uint32_t f16, bool fz16)
1385 {
1386 const int f16_bias = 15;
1387 const int f32_bias = 127;
1388 uint32_t sign = extract32(f16, 15, 1);
1389 uint32_t exp = extract32(f16, 10, 5);
1390 uint32_t frac = extract32(f16, 0, 10);
1391
1392 if (exp == 0x1f) {
1393 /* Inf or NaN */
1394 exp = 0xff;
1395 } else if (exp == 0) {
1396 /* Zero or denormal. */
1397 if (frac != 0) {
1398 if (fz16) {
1399 frac = 0;
1400 } else {
1401 /*
1402 * Denormal; these are all normal float32.
1403 * Shift the fraction so that the msb is at bit 11,
1404 * then remove bit 11 as the implicit bit of the
1405 * normalized float32. Note that we still go through
1406 * the shift for normal numbers below, to put the
1407 * float32 fraction at the right place.
1408 */
1409 int shift = clz32(frac) - 21;
1410 frac = (frac << shift) & 0x3ff;
1411 exp = f32_bias - f16_bias - shift + 1;
1412 }
1413 }
1414 } else {
1415 /* Normal number; adjust the bias. */
1416 exp += f32_bias - f16_bias;
1417 }
1418 sign <<= 31;
1419 exp <<= 23;
1420 frac <<= 23 - 10;
1421
1422 return sign | exp | frac;
1423 }
1424
1425 static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2)
1426 {
1427 /*
1428 * Branchless load of u32[0], u64[0], u32[1], or u64[1].
1429 * Load the 2nd qword iff is_q & is_2.
1430 * Shift to the 2nd dword iff !is_q & is_2.
1431 * For !is_q & !is_2, the upper bits of the result are garbage.
1432 */
1433 return ptr[is_q & is_2] >> ((is_2 & ~is_q) << 5);
1434 }
1435
1436 /*
1437 * Note that FMLAL requires oprsz == 8 or oprsz == 16,
1438 * as there is not yet SVE versions that might use blocking.
1439 */
1440
1441 static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst,
1442 uint32_t desc, bool fz16)
1443 {
1444 intptr_t i, oprsz = simd_oprsz(desc);
1445 int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
1446 int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
1447 int is_q = oprsz == 16;
1448 uint64_t n_4, m_4;
1449
1450 /* Pre-load all of the f16 data, avoiding overlap issues. */
1451 n_4 = load4_f16(vn, is_q, is_2);
1452 m_4 = load4_f16(vm, is_q, is_2);
1453
1454 /* Negate all inputs for FMLSL at once. */
1455 if (is_s) {
1456 n_4 ^= 0x8000800080008000ull;
1457 }
1458
1459 for (i = 0; i < oprsz / 4; i++) {
1460 float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
1461 float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16);
1462 d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
1463 }
1464 clear_tail(d, oprsz, simd_maxsz(desc));
1465 }
1466
1467 void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm,
1468 void *venv, uint32_t desc)
1469 {
1470 CPUARMState *env = venv;
1471 do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc,
1472 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1473 }
1474
1475 void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
1476 void *venv, uint32_t desc)
1477 {
1478 CPUARMState *env = venv;
1479 do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc,
1480 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1481 }
1482
1483 static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
1484 uint32_t desc, bool fz16)
1485 {
1486 intptr_t i, oprsz = simd_oprsz(desc);
1487 int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
1488 int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
1489 int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3);
1490 int is_q = oprsz == 16;
1491 uint64_t n_4;
1492 float32 m_1;
1493
1494 /* Pre-load all of the f16 data, avoiding overlap issues. */
1495 n_4 = load4_f16(vn, is_q, is_2);
1496
1497 /* Negate all inputs for FMLSL at once. */
1498 if (is_s) {
1499 n_4 ^= 0x8000800080008000ull;
1500 }
1501
1502 m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16);
1503
1504 for (i = 0; i < oprsz / 4; i++) {
1505 float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
1506 d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
1507 }
1508 clear_tail(d, oprsz, simd_maxsz(desc));
1509 }
1510
1511 void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm,
1512 void *venv, uint32_t desc)
1513 {
1514 CPUARMState *env = venv;
1515 do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc,
1516 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1517 }
1518
1519 void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
1520 void *venv, uint32_t desc)
1521 {
1522 CPUARMState *env = venv;
1523 do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc,
1524 get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
1525 }
1526
1527 void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
1528 {
1529 intptr_t i, opr_sz = simd_oprsz(desc);
1530 int8_t *d = vd, *n = vn, *m = vm;
1531
1532 for (i = 0; i < opr_sz; ++i) {
1533 int8_t mm = m[i];
1534 int8_t nn = n[i];
1535 int8_t res = 0;
1536 if (mm >= 0) {
1537 if (mm < 8) {
1538 res = nn << mm;
1539 }
1540 } else {
1541 res = nn >> (mm > -8 ? -mm : 7);
1542 }
1543 d[i] = res;
1544 }
1545 clear_tail(d, opr_sz, simd_maxsz(desc));
1546 }
1547
1548 void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc)
1549 {
1550 intptr_t i, opr_sz = simd_oprsz(desc);
1551 int16_t *d = vd, *n = vn, *m = vm;
1552
1553 for (i = 0; i < opr_sz / 2; ++i) {
1554 int8_t mm = m[i]; /* only 8 bits of shift are significant */
1555 int16_t nn = n[i];
1556 int16_t res = 0;
1557 if (mm >= 0) {
1558 if (mm < 16) {
1559 res = nn << mm;
1560 }
1561 } else {
1562 res = nn >> (mm > -16 ? -mm : 15);
1563 }
1564 d[i] = res;
1565 }
1566 clear_tail(d, opr_sz, simd_maxsz(desc));
1567 }
1568
1569 void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc)
1570 {
1571 intptr_t i, opr_sz = simd_oprsz(desc);
1572 uint8_t *d = vd, *n = vn, *m = vm;
1573
1574 for (i = 0; i < opr_sz; ++i) {
1575 int8_t mm = m[i];
1576 uint8_t nn = n[i];
1577 uint8_t res = 0;
1578 if (mm >= 0) {
1579 if (mm < 8) {
1580 res = nn << mm;
1581 }
1582 } else {
1583 if (mm > -8) {
1584 res = nn >> -mm;
1585 }
1586 }
1587 d[i] = res;
1588 }
1589 clear_tail(d, opr_sz, simd_maxsz(desc));
1590 }
1591
1592 void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc)
1593 {
1594 intptr_t i, opr_sz = simd_oprsz(desc);
1595 uint16_t *d = vd, *n = vn, *m = vm;
1596
1597 for (i = 0; i < opr_sz / 2; ++i) {
1598 int8_t mm = m[i]; /* only 8 bits of shift are significant */
1599 uint16_t nn = n[i];
1600 uint16_t res = 0;
1601 if (mm >= 0) {
1602 if (mm < 16) {
1603 res = nn << mm;
1604 }
1605 } else {
1606 if (mm > -16) {
1607 res = nn >> -mm;
1608 }
1609 }
1610 d[i] = res;
1611 }
1612 clear_tail(d, opr_sz, simd_maxsz(desc));
1613 }
1614
1615 /*
1616 * 8x8->8 polynomial multiply.
1617 *
1618 * Polynomial multiplication is like integer multiplication except the
1619 * partial products are XORed, not added.
1620 *
1621 * TODO: expose this as a generic vector operation, as it is a common
1622 * crypto building block.
1623 */
1624 void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc)
1625 {
1626 intptr_t i, j, opr_sz = simd_oprsz(desc);
1627 uint64_t *d = vd, *n = vn, *m = vm;
1628
1629 for (i = 0; i < opr_sz / 8; ++i) {
1630 uint64_t nn = n[i];
1631 uint64_t mm = m[i];
1632 uint64_t rr = 0;
1633
1634 for (j = 0; j < 8; ++j) {
1635 uint64_t mask = (nn & 0x0101010101010101ull) * 0xff;
1636 rr ^= mm & mask;
1637 mm = (mm << 1) & 0xfefefefefefefefeull;
1638 nn >>= 1;
1639 }
1640 d[i] = rr;
1641 }
1642 clear_tail(d, opr_sz, simd_maxsz(desc));
1643 }
1644
1645 /*
1646 * 64x64->128 polynomial multiply.
1647 * Because of the lanes are not accessed in strict columns,
1648 * this probably cannot be turned into a generic helper.
1649 */
1650 void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc)
1651 {
1652 intptr_t i, j, opr_sz = simd_oprsz(desc);
1653 intptr_t hi = simd_data(desc);
1654 uint64_t *d = vd, *n = vn, *m = vm;
1655
1656 for (i = 0; i < opr_sz / 8; i += 2) {
1657 uint64_t nn = n[i + hi];
1658 uint64_t mm = m[i + hi];
1659 uint64_t rhi = 0;
1660 uint64_t rlo = 0;
1661
1662 /* Bit 0 can only influence the low 64-bit result. */
1663 if (nn & 1) {
1664 rlo = mm;
1665 }
1666
1667 for (j = 1; j < 64; ++j) {
1668 uint64_t mask = -((nn >> j) & 1);
1669 rlo ^= (mm << j) & mask;
1670 rhi ^= (mm >> (64 - j)) & mask;
1671 }
1672 d[i] = rlo;
1673 d[i + 1] = rhi;
1674 }
1675 clear_tail(d, opr_sz, simd_maxsz(desc));
1676 }
1677
1678 /*
1679 * 8x8->16 polynomial multiply.
1680 *
1681 * The byte inputs are expanded to (or extracted from) half-words.
1682 * Note that neon and sve2 get the inputs from different positions.
1683 * This allows 4 bytes to be processed in parallel with uint64_t.
1684 */
1685
1686 static uint64_t expand_byte_to_half(uint64_t x)
1687 {
1688 return (x & 0x000000ff)
1689 | ((x & 0x0000ff00) << 8)
1690 | ((x & 0x00ff0000) << 16)
1691 | ((x & 0xff000000) << 24);
1692 }
1693
1694 static uint64_t pmull_h(uint64_t op1, uint64_t op2)
1695 {
1696 uint64_t result = 0;
1697 int i;
1698
1699 for (i = 0; i < 8; ++i) {
1700 uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff;
1701 result ^= op2 & mask;
1702 op1 >>= 1;
1703 op2 <<= 1;
1704 }
1705 return result;
1706 }
1707
1708 void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
1709 {
1710 int hi = simd_data(desc);
1711 uint64_t *d = vd, *n = vn, *m = vm;
1712 uint64_t nn = n[hi], mm = m[hi];
1713
1714 d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
1715 nn >>= 32;
1716 mm >>= 32;
1717 d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
1718
1719 clear_tail(d, 16, simd_maxsz(desc));
1720 }
1721
1722 #ifdef TARGET_AARCH64
1723 void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
1724 {
1725 int shift = simd_data(desc) * 8;
1726 intptr_t i, opr_sz = simd_oprsz(desc);
1727 uint64_t *d = vd, *n = vn, *m = vm;
1728
1729 for (i = 0; i < opr_sz / 8; ++i) {
1730 uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull;
1731 uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull;
1732
1733 d[i] = pmull_h(nn, mm);
1734 }
1735 }
1736 #endif
1737
1738 #define DO_CMP0(NAME, TYPE, OP) \
1739 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1740 { \
1741 intptr_t i, opr_sz = simd_oprsz(desc); \
1742 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1743 TYPE nn = *(TYPE *)(vn + i); \
1744 *(TYPE *)(vd + i) = -(nn OP 0); \
1745 } \
1746 clear_tail(vd, opr_sz, simd_maxsz(desc)); \
1747 }
1748
1749 DO_CMP0(gvec_ceq0_b, int8_t, ==)
1750 DO_CMP0(gvec_clt0_b, int8_t, <)
1751 DO_CMP0(gvec_cle0_b, int8_t, <=)
1752 DO_CMP0(gvec_cgt0_b, int8_t, >)
1753 DO_CMP0(gvec_cge0_b, int8_t, >=)
1754
1755 DO_CMP0(gvec_ceq0_h, int16_t, ==)
1756 DO_CMP0(gvec_clt0_h, int16_t, <)
1757 DO_CMP0(gvec_cle0_h, int16_t, <=)
1758 DO_CMP0(gvec_cgt0_h, int16_t, >)
1759 DO_CMP0(gvec_cge0_h, int16_t, >=)
1760
1761 #undef DO_CMP0
1762
1763 #define DO_ABD(NAME, TYPE) \
1764 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1765 { \
1766 intptr_t i, opr_sz = simd_oprsz(desc); \
1767 TYPE *d = vd, *n = vn, *m = vm; \
1768 \
1769 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1770 d[i] = n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1771 } \
1772 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1773 }
1774
1775 DO_ABD(gvec_sabd_b, int8_t)
1776 DO_ABD(gvec_sabd_h, int16_t)
1777 DO_ABD(gvec_sabd_s, int32_t)
1778 DO_ABD(gvec_sabd_d, int64_t)
1779
1780 DO_ABD(gvec_uabd_b, uint8_t)
1781 DO_ABD(gvec_uabd_h, uint16_t)
1782 DO_ABD(gvec_uabd_s, uint32_t)
1783 DO_ABD(gvec_uabd_d, uint64_t)
1784
1785 #undef DO_ABD
1786
1787 #define DO_ABA(NAME, TYPE) \
1788 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1789 { \
1790 intptr_t i, opr_sz = simd_oprsz(desc); \
1791 TYPE *d = vd, *n = vn, *m = vm; \
1792 \
1793 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1794 d[i] += n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1795 } \
1796 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1797 }
1798
1799 DO_ABA(gvec_saba_b, int8_t)
1800 DO_ABA(gvec_saba_h, int16_t)
1801 DO_ABA(gvec_saba_s, int32_t)
1802 DO_ABA(gvec_saba_d, int64_t)
1803
1804 DO_ABA(gvec_uaba_b, uint8_t)
1805 DO_ABA(gvec_uaba_h, uint16_t)
1806 DO_ABA(gvec_uaba_s, uint32_t)
1807 DO_ABA(gvec_uaba_d, uint64_t)
1808
1809 #undef DO_ABA
1810
1811 #define DO_NEON_PAIRWISE(NAME, OP) \
1812 void HELPER(NAME##s)(void *vd, void *vn, void *vm, \
1813 void *stat, uint32_t oprsz) \
1814 { \
1815 float_status *fpst = stat; \
1816 float32 *d = vd; \
1817 float32 *n = vn; \
1818 float32 *m = vm; \
1819 float32 r0, r1; \
1820 \
1821 /* Read all inputs before writing outputs in case vm == vd */ \
1822 r0 = float32_##OP(n[H4(0)], n[H4(1)], fpst); \
1823 r1 = float32_##OP(m[H4(0)], m[H4(1)], fpst); \
1824 \
1825 d[H4(0)] = r0; \
1826 d[H4(1)] = r1; \
1827 } \
1828 \
1829 void HELPER(NAME##h)(void *vd, void *vn, void *vm, \
1830 void *stat, uint32_t oprsz) \
1831 { \
1832 float_status *fpst = stat; \
1833 float16 *d = vd; \
1834 float16 *n = vn; \
1835 float16 *m = vm; \
1836 float16 r0, r1, r2, r3; \
1837 \
1838 /* Read all inputs before writing outputs in case vm == vd */ \
1839 r0 = float16_##OP(n[H2(0)], n[H2(1)], fpst); \
1840 r1 = float16_##OP(n[H2(2)], n[H2(3)], fpst); \
1841 r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
1842 r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
1843 \
1844 d[H4(0)] = r0; \
1845 d[H4(1)] = r1; \
1846 d[H4(2)] = r2; \
1847 d[H4(3)] = r3; \
1848 }
1849
1850 DO_NEON_PAIRWISE(neon_padd, add)
1851 DO_NEON_PAIRWISE(neon_pmax, max)
1852 DO_NEON_PAIRWISE(neon_pmin, min)
1853
1854 #undef DO_NEON_PAIRWISE
1855
1856 #define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
1857 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1858 { \
1859 intptr_t i, oprsz = simd_oprsz(desc); \
1860 int shift = simd_data(desc); \
1861 TYPE *d = vd, *n = vn; \
1862 float_status *fpst = stat; \
1863 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1864 d[i] = FUNC(n[i], shift, fpst); \
1865 } \
1866 clear_tail(d, oprsz, simd_maxsz(desc)); \
1867 }
1868
1869 DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t)
1870 DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t)
1871 DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t)
1872 DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t)
1873 DO_VCVT_FIXED(gvec_vcvt_sh, helper_vfp_shtoh, uint16_t)
1874 DO_VCVT_FIXED(gvec_vcvt_uh, helper_vfp_uhtoh, uint16_t)
1875 DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t)
1876 DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
1877
1878 #undef DO_VCVT_FIXED
1879
1880 #define DO_VCVT_RMODE(NAME, FUNC, TYPE) \
1881 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1882 { \
1883 float_status *fpst = stat; \
1884 intptr_t i, oprsz = simd_oprsz(desc); \
1885 uint32_t rmode = simd_data(desc); \
1886 uint32_t prev_rmode = get_float_rounding_mode(fpst); \
1887 TYPE *d = vd, *n = vn; \
1888 set_float_rounding_mode(rmode, fpst); \
1889 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1890 d[i] = FUNC(n[i], 0, fpst); \
1891 } \
1892 set_float_rounding_mode(prev_rmode, fpst); \
1893 clear_tail(d, oprsz, simd_maxsz(desc)); \
1894 }
1895
1896 DO_VCVT_RMODE(gvec_vcvt_rm_ss, helper_vfp_tosls, uint32_t)
1897 DO_VCVT_RMODE(gvec_vcvt_rm_us, helper_vfp_touls, uint32_t)
1898 DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t)
1899 DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t)
1900
1901 #undef DO_VCVT_RMODE
1902
1903 #define DO_VRINT_RMODE(NAME, FUNC, TYPE) \
1904 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1905 { \
1906 float_status *fpst = stat; \
1907 intptr_t i, oprsz = simd_oprsz(desc); \
1908 uint32_t rmode = simd_data(desc); \
1909 uint32_t prev_rmode = get_float_rounding_mode(fpst); \
1910 TYPE *d = vd, *n = vn; \
1911 set_float_rounding_mode(rmode, fpst); \
1912 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1913 d[i] = FUNC(n[i], fpst); \
1914 } \
1915 set_float_rounding_mode(prev_rmode, fpst); \
1916 clear_tail(d, oprsz, simd_maxsz(desc)); \
1917 }
1918
1919 DO_VRINT_RMODE(gvec_vrint_rm_h, helper_rinth, uint16_t)
1920 DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t)
1921
1922 #undef DO_VRINT_RMODE