target/arm: Implement VFP fp16 for VABS, VNEG, VSQRT
[qemu.git] / accel / tcg / tcg-runtime-gvec.c
1 /*
2 * Generic vectorized operation runtime
3 *
4 * Copyright (c) 2018 Linaro
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "tcg/tcg-gvec-desc.h"
25
26
27 static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc)
28 {
29 intptr_t maxsz = simd_maxsz(desc);
30 intptr_t i;
31
32 if (unlikely(maxsz > oprsz)) {
33 for (i = oprsz; i < maxsz; i += sizeof(uint64_t)) {
34 *(uint64_t *)(d + i) = 0;
35 }
36 }
37 }
38
39 void HELPER(gvec_add8)(void *d, void *a, void *b, uint32_t desc)
40 {
41 intptr_t oprsz = simd_oprsz(desc);
42 intptr_t i;
43
44 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
45 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + *(uint8_t *)(b + i);
46 }
47 clear_high(d, oprsz, desc);
48 }
49
50 void HELPER(gvec_add16)(void *d, void *a, void *b, uint32_t desc)
51 {
52 intptr_t oprsz = simd_oprsz(desc);
53 intptr_t i;
54
55 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
56 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + *(uint16_t *)(b + i);
57 }
58 clear_high(d, oprsz, desc);
59 }
60
61 void HELPER(gvec_add32)(void *d, void *a, void *b, uint32_t desc)
62 {
63 intptr_t oprsz = simd_oprsz(desc);
64 intptr_t i;
65
66 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
67 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + *(uint32_t *)(b + i);
68 }
69 clear_high(d, oprsz, desc);
70 }
71
72 void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc)
73 {
74 intptr_t oprsz = simd_oprsz(desc);
75 intptr_t i;
76
77 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
78 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + *(uint64_t *)(b + i);
79 }
80 clear_high(d, oprsz, desc);
81 }
82
83 void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc)
84 {
85 intptr_t oprsz = simd_oprsz(desc);
86 intptr_t i;
87
88 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
89 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + (uint8_t)b;
90 }
91 clear_high(d, oprsz, desc);
92 }
93
94 void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc)
95 {
96 intptr_t oprsz = simd_oprsz(desc);
97 intptr_t i;
98
99 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
100 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + (uint16_t)b;
101 }
102 clear_high(d, oprsz, desc);
103 }
104
105 void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc)
106 {
107 intptr_t oprsz = simd_oprsz(desc);
108 intptr_t i;
109
110 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
111 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + (uint32_t)b;
112 }
113 clear_high(d, oprsz, desc);
114 }
115
116 void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc)
117 {
118 intptr_t oprsz = simd_oprsz(desc);
119 intptr_t i;
120
121 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
122 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + b;
123 }
124 clear_high(d, oprsz, desc);
125 }
126
127 void HELPER(gvec_sub8)(void *d, void *a, void *b, uint32_t desc)
128 {
129 intptr_t oprsz = simd_oprsz(desc);
130 intptr_t i;
131
132 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
133 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - *(uint8_t *)(b + i);
134 }
135 clear_high(d, oprsz, desc);
136 }
137
138 void HELPER(gvec_sub16)(void *d, void *a, void *b, uint32_t desc)
139 {
140 intptr_t oprsz = simd_oprsz(desc);
141 intptr_t i;
142
143 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
144 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - *(uint16_t *)(b + i);
145 }
146 clear_high(d, oprsz, desc);
147 }
148
149 void HELPER(gvec_sub32)(void *d, void *a, void *b, uint32_t desc)
150 {
151 intptr_t oprsz = simd_oprsz(desc);
152 intptr_t i;
153
154 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
155 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - *(uint32_t *)(b + i);
156 }
157 clear_high(d, oprsz, desc);
158 }
159
160 void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc)
161 {
162 intptr_t oprsz = simd_oprsz(desc);
163 intptr_t i;
164
165 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
166 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - *(uint64_t *)(b + i);
167 }
168 clear_high(d, oprsz, desc);
169 }
170
171 void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc)
172 {
173 intptr_t oprsz = simd_oprsz(desc);
174 intptr_t i;
175
176 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
177 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - (uint8_t)b;
178 }
179 clear_high(d, oprsz, desc);
180 }
181
182 void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc)
183 {
184 intptr_t oprsz = simd_oprsz(desc);
185 intptr_t i;
186
187 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
188 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - (uint16_t)b;
189 }
190 clear_high(d, oprsz, desc);
191 }
192
193 void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc)
194 {
195 intptr_t oprsz = simd_oprsz(desc);
196 intptr_t i;
197
198 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
199 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - (uint32_t)b;
200 }
201 clear_high(d, oprsz, desc);
202 }
203
204 void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc)
205 {
206 intptr_t oprsz = simd_oprsz(desc);
207 intptr_t i;
208
209 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
210 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - b;
211 }
212 clear_high(d, oprsz, desc);
213 }
214
215 void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc)
216 {
217 intptr_t oprsz = simd_oprsz(desc);
218 intptr_t i;
219
220 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
221 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * *(uint8_t *)(b + i);
222 }
223 clear_high(d, oprsz, desc);
224 }
225
226 void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc)
227 {
228 intptr_t oprsz = simd_oprsz(desc);
229 intptr_t i;
230
231 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
232 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * *(uint16_t *)(b + i);
233 }
234 clear_high(d, oprsz, desc);
235 }
236
237 void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc)
238 {
239 intptr_t oprsz = simd_oprsz(desc);
240 intptr_t i;
241
242 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
243 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * *(uint32_t *)(b + i);
244 }
245 clear_high(d, oprsz, desc);
246 }
247
248 void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc)
249 {
250 intptr_t oprsz = simd_oprsz(desc);
251 intptr_t i;
252
253 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
254 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * *(uint64_t *)(b + i);
255 }
256 clear_high(d, oprsz, desc);
257 }
258
259 void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc)
260 {
261 intptr_t oprsz = simd_oprsz(desc);
262 intptr_t i;
263
264 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
265 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * (uint8_t)b;
266 }
267 clear_high(d, oprsz, desc);
268 }
269
270 void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc)
271 {
272 intptr_t oprsz = simd_oprsz(desc);
273 intptr_t i;
274
275 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
276 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * (uint16_t)b;
277 }
278 clear_high(d, oprsz, desc);
279 }
280
281 void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc)
282 {
283 intptr_t oprsz = simd_oprsz(desc);
284 intptr_t i;
285
286 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
287 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * (uint32_t)b;
288 }
289 clear_high(d, oprsz, desc);
290 }
291
292 void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc)
293 {
294 intptr_t oprsz = simd_oprsz(desc);
295 intptr_t i;
296
297 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
298 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * b;
299 }
300 clear_high(d, oprsz, desc);
301 }
302
303 void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc)
304 {
305 intptr_t oprsz = simd_oprsz(desc);
306 intptr_t i;
307
308 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
309 *(uint8_t *)(d + i) = -*(uint8_t *)(a + i);
310 }
311 clear_high(d, oprsz, desc);
312 }
313
314 void HELPER(gvec_neg16)(void *d, void *a, uint32_t desc)
315 {
316 intptr_t oprsz = simd_oprsz(desc);
317 intptr_t i;
318
319 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
320 *(uint16_t *)(d + i) = -*(uint16_t *)(a + i);
321 }
322 clear_high(d, oprsz, desc);
323 }
324
325 void HELPER(gvec_neg32)(void *d, void *a, uint32_t desc)
326 {
327 intptr_t oprsz = simd_oprsz(desc);
328 intptr_t i;
329
330 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
331 *(uint32_t *)(d + i) = -*(uint32_t *)(a + i);
332 }
333 clear_high(d, oprsz, desc);
334 }
335
336 void HELPER(gvec_neg64)(void *d, void *a, uint32_t desc)
337 {
338 intptr_t oprsz = simd_oprsz(desc);
339 intptr_t i;
340
341 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
342 *(uint64_t *)(d + i) = -*(uint64_t *)(a + i);
343 }
344 clear_high(d, oprsz, desc);
345 }
346
347 void HELPER(gvec_abs8)(void *d, void *a, uint32_t desc)
348 {
349 intptr_t oprsz = simd_oprsz(desc);
350 intptr_t i;
351
352 for (i = 0; i < oprsz; i += sizeof(int8_t)) {
353 int8_t aa = *(int8_t *)(a + i);
354 *(int8_t *)(d + i) = aa < 0 ? -aa : aa;
355 }
356 clear_high(d, oprsz, desc);
357 }
358
359 void HELPER(gvec_abs16)(void *d, void *a, uint32_t desc)
360 {
361 intptr_t oprsz = simd_oprsz(desc);
362 intptr_t i;
363
364 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
365 int16_t aa = *(int16_t *)(a + i);
366 *(int16_t *)(d + i) = aa < 0 ? -aa : aa;
367 }
368 clear_high(d, oprsz, desc);
369 }
370
371 void HELPER(gvec_abs32)(void *d, void *a, uint32_t desc)
372 {
373 intptr_t oprsz = simd_oprsz(desc);
374 intptr_t i;
375
376 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
377 int32_t aa = *(int32_t *)(a + i);
378 *(int32_t *)(d + i) = aa < 0 ? -aa : aa;
379 }
380 clear_high(d, oprsz, desc);
381 }
382
383 void HELPER(gvec_abs64)(void *d, void *a, uint32_t desc)
384 {
385 intptr_t oprsz = simd_oprsz(desc);
386 intptr_t i;
387
388 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
389 int64_t aa = *(int64_t *)(a + i);
390 *(int64_t *)(d + i) = aa < 0 ? -aa : aa;
391 }
392 clear_high(d, oprsz, desc);
393 }
394
395 void HELPER(gvec_mov)(void *d, void *a, uint32_t desc)
396 {
397 intptr_t oprsz = simd_oprsz(desc);
398
399 memcpy(d, a, oprsz);
400 clear_high(d, oprsz, desc);
401 }
402
403 void HELPER(gvec_dup64)(void *d, uint32_t desc, uint64_t c)
404 {
405 intptr_t oprsz = simd_oprsz(desc);
406 intptr_t i;
407
408 if (c == 0) {
409 oprsz = 0;
410 } else {
411 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
412 *(uint64_t *)(d + i) = c;
413 }
414 }
415 clear_high(d, oprsz, desc);
416 }
417
418 void HELPER(gvec_dup32)(void *d, uint32_t desc, uint32_t c)
419 {
420 intptr_t oprsz = simd_oprsz(desc);
421 intptr_t i;
422
423 if (c == 0) {
424 oprsz = 0;
425 } else {
426 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
427 *(uint32_t *)(d + i) = c;
428 }
429 }
430 clear_high(d, oprsz, desc);
431 }
432
433 void HELPER(gvec_dup16)(void *d, uint32_t desc, uint32_t c)
434 {
435 HELPER(gvec_dup32)(d, desc, 0x00010001 * (c & 0xffff));
436 }
437
438 void HELPER(gvec_dup8)(void *d, uint32_t desc, uint32_t c)
439 {
440 HELPER(gvec_dup32)(d, desc, 0x01010101 * (c & 0xff));
441 }
442
443 void HELPER(gvec_not)(void *d, void *a, uint32_t desc)
444 {
445 intptr_t oprsz = simd_oprsz(desc);
446 intptr_t i;
447
448 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
449 *(uint64_t *)(d + i) = ~*(uint64_t *)(a + i);
450 }
451 clear_high(d, oprsz, desc);
452 }
453
454 void HELPER(gvec_and)(void *d, void *a, void *b, uint32_t desc)
455 {
456 intptr_t oprsz = simd_oprsz(desc);
457 intptr_t i;
458
459 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
460 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & *(uint64_t *)(b + i);
461 }
462 clear_high(d, oprsz, desc);
463 }
464
465 void HELPER(gvec_or)(void *d, void *a, void *b, uint32_t desc)
466 {
467 intptr_t oprsz = simd_oprsz(desc);
468 intptr_t i;
469
470 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
471 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | *(uint64_t *)(b + i);
472 }
473 clear_high(d, oprsz, desc);
474 }
475
476 void HELPER(gvec_xor)(void *d, void *a, void *b, uint32_t desc)
477 {
478 intptr_t oprsz = simd_oprsz(desc);
479 intptr_t i;
480
481 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
482 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ *(uint64_t *)(b + i);
483 }
484 clear_high(d, oprsz, desc);
485 }
486
487 void HELPER(gvec_andc)(void *d, void *a, void *b, uint32_t desc)
488 {
489 intptr_t oprsz = simd_oprsz(desc);
490 intptr_t i;
491
492 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
493 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) &~ *(uint64_t *)(b + i);
494 }
495 clear_high(d, oprsz, desc);
496 }
497
498 void HELPER(gvec_orc)(void *d, void *a, void *b, uint32_t desc)
499 {
500 intptr_t oprsz = simd_oprsz(desc);
501 intptr_t i;
502
503 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
504 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) |~ *(uint64_t *)(b + i);
505 }
506 clear_high(d, oprsz, desc);
507 }
508
509 void HELPER(gvec_nand)(void *d, void *a, void *b, uint32_t desc)
510 {
511 intptr_t oprsz = simd_oprsz(desc);
512 intptr_t i;
513
514 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
515 *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) & *(uint64_t *)(b + i));
516 }
517 clear_high(d, oprsz, desc);
518 }
519
520 void HELPER(gvec_nor)(void *d, void *a, void *b, uint32_t desc)
521 {
522 intptr_t oprsz = simd_oprsz(desc);
523 intptr_t i;
524
525 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
526 *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) | *(uint64_t *)(b + i));
527 }
528 clear_high(d, oprsz, desc);
529 }
530
531 void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc)
532 {
533 intptr_t oprsz = simd_oprsz(desc);
534 intptr_t i;
535
536 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
537 *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) ^ *(uint64_t *)(b + i));
538 }
539 clear_high(d, oprsz, desc);
540 }
541
542 void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
543 {
544 intptr_t oprsz = simd_oprsz(desc);
545 intptr_t i;
546
547 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
548 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & b;
549 }
550 clear_high(d, oprsz, desc);
551 }
552
553 void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
554 {
555 intptr_t oprsz = simd_oprsz(desc);
556 intptr_t i;
557
558 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
559 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ b;
560 }
561 clear_high(d, oprsz, desc);
562 }
563
564 void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc)
565 {
566 intptr_t oprsz = simd_oprsz(desc);
567 intptr_t i;
568
569 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
570 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | b;
571 }
572 clear_high(d, oprsz, desc);
573 }
574
575 void HELPER(gvec_shl8i)(void *d, void *a, uint32_t desc)
576 {
577 intptr_t oprsz = simd_oprsz(desc);
578 int shift = simd_data(desc);
579 intptr_t i;
580
581 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
582 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << shift;
583 }
584 clear_high(d, oprsz, desc);
585 }
586
587 void HELPER(gvec_shl16i)(void *d, void *a, uint32_t desc)
588 {
589 intptr_t oprsz = simd_oprsz(desc);
590 int shift = simd_data(desc);
591 intptr_t i;
592
593 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
594 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << shift;
595 }
596 clear_high(d, oprsz, desc);
597 }
598
599 void HELPER(gvec_shl32i)(void *d, void *a, uint32_t desc)
600 {
601 intptr_t oprsz = simd_oprsz(desc);
602 int shift = simd_data(desc);
603 intptr_t i;
604
605 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
606 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << shift;
607 }
608 clear_high(d, oprsz, desc);
609 }
610
611 void HELPER(gvec_shl64i)(void *d, void *a, uint32_t desc)
612 {
613 intptr_t oprsz = simd_oprsz(desc);
614 int shift = simd_data(desc);
615 intptr_t i;
616
617 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
618 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << shift;
619 }
620 clear_high(d, oprsz, desc);
621 }
622
623 void HELPER(gvec_shr8i)(void *d, void *a, uint32_t desc)
624 {
625 intptr_t oprsz = simd_oprsz(desc);
626 int shift = simd_data(desc);
627 intptr_t i;
628
629 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
630 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> shift;
631 }
632 clear_high(d, oprsz, desc);
633 }
634
635 void HELPER(gvec_shr16i)(void *d, void *a, uint32_t desc)
636 {
637 intptr_t oprsz = simd_oprsz(desc);
638 int shift = simd_data(desc);
639 intptr_t i;
640
641 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
642 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> shift;
643 }
644 clear_high(d, oprsz, desc);
645 }
646
647 void HELPER(gvec_shr32i)(void *d, void *a, uint32_t desc)
648 {
649 intptr_t oprsz = simd_oprsz(desc);
650 int shift = simd_data(desc);
651 intptr_t i;
652
653 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
654 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> shift;
655 }
656 clear_high(d, oprsz, desc);
657 }
658
659 void HELPER(gvec_shr64i)(void *d, void *a, uint32_t desc)
660 {
661 intptr_t oprsz = simd_oprsz(desc);
662 int shift = simd_data(desc);
663 intptr_t i;
664
665 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
666 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> shift;
667 }
668 clear_high(d, oprsz, desc);
669 }
670
671 void HELPER(gvec_sar8i)(void *d, void *a, uint32_t desc)
672 {
673 intptr_t oprsz = simd_oprsz(desc);
674 int shift = simd_data(desc);
675 intptr_t i;
676
677 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
678 *(int8_t *)(d + i) = *(int8_t *)(a + i) >> shift;
679 }
680 clear_high(d, oprsz, desc);
681 }
682
683 void HELPER(gvec_sar16i)(void *d, void *a, uint32_t desc)
684 {
685 intptr_t oprsz = simd_oprsz(desc);
686 int shift = simd_data(desc);
687 intptr_t i;
688
689 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
690 *(int16_t *)(d + i) = *(int16_t *)(a + i) >> shift;
691 }
692 clear_high(d, oprsz, desc);
693 }
694
695 void HELPER(gvec_sar32i)(void *d, void *a, uint32_t desc)
696 {
697 intptr_t oprsz = simd_oprsz(desc);
698 int shift = simd_data(desc);
699 intptr_t i;
700
701 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
702 *(int32_t *)(d + i) = *(int32_t *)(a + i) >> shift;
703 }
704 clear_high(d, oprsz, desc);
705 }
706
707 void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc)
708 {
709 intptr_t oprsz = simd_oprsz(desc);
710 int shift = simd_data(desc);
711 intptr_t i;
712
713 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
714 *(int64_t *)(d + i) = *(int64_t *)(a + i) >> shift;
715 }
716 clear_high(d, oprsz, desc);
717 }
718
719 void HELPER(gvec_rotl8i)(void *d, void *a, uint32_t desc)
720 {
721 intptr_t oprsz = simd_oprsz(desc);
722 int shift = simd_data(desc);
723 intptr_t i;
724
725 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
726 *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), shift);
727 }
728 clear_high(d, oprsz, desc);
729 }
730
731 void HELPER(gvec_rotl16i)(void *d, void *a, uint32_t desc)
732 {
733 intptr_t oprsz = simd_oprsz(desc);
734 int shift = simd_data(desc);
735 intptr_t i;
736
737 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
738 *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), shift);
739 }
740 clear_high(d, oprsz, desc);
741 }
742
743 void HELPER(gvec_rotl32i)(void *d, void *a, uint32_t desc)
744 {
745 intptr_t oprsz = simd_oprsz(desc);
746 int shift = simd_data(desc);
747 intptr_t i;
748
749 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
750 *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), shift);
751 }
752 clear_high(d, oprsz, desc);
753 }
754
755 void HELPER(gvec_rotl64i)(void *d, void *a, uint32_t desc)
756 {
757 intptr_t oprsz = simd_oprsz(desc);
758 int shift = simd_data(desc);
759 intptr_t i;
760
761 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
762 *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), shift);
763 }
764 clear_high(d, oprsz, desc);
765 }
766
767 void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc)
768 {
769 intptr_t oprsz = simd_oprsz(desc);
770 intptr_t i;
771
772 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
773 uint8_t sh = *(uint8_t *)(b + i) & 7;
774 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << sh;
775 }
776 clear_high(d, oprsz, desc);
777 }
778
779 void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc)
780 {
781 intptr_t oprsz = simd_oprsz(desc);
782 intptr_t i;
783
784 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
785 uint8_t sh = *(uint16_t *)(b + i) & 15;
786 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << sh;
787 }
788 clear_high(d, oprsz, desc);
789 }
790
791 void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc)
792 {
793 intptr_t oprsz = simd_oprsz(desc);
794 intptr_t i;
795
796 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
797 uint8_t sh = *(uint32_t *)(b + i) & 31;
798 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << sh;
799 }
800 clear_high(d, oprsz, desc);
801 }
802
803 void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc)
804 {
805 intptr_t oprsz = simd_oprsz(desc);
806 intptr_t i;
807
808 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
809 uint8_t sh = *(uint64_t *)(b + i) & 63;
810 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << sh;
811 }
812 clear_high(d, oprsz, desc);
813 }
814
815 void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc)
816 {
817 intptr_t oprsz = simd_oprsz(desc);
818 intptr_t i;
819
820 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
821 uint8_t sh = *(uint8_t *)(b + i) & 7;
822 *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> sh;
823 }
824 clear_high(d, oprsz, desc);
825 }
826
827 void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc)
828 {
829 intptr_t oprsz = simd_oprsz(desc);
830 intptr_t i;
831
832 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
833 uint8_t sh = *(uint16_t *)(b + i) & 15;
834 *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> sh;
835 }
836 clear_high(d, oprsz, desc);
837 }
838
839 void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc)
840 {
841 intptr_t oprsz = simd_oprsz(desc);
842 intptr_t i;
843
844 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
845 uint8_t sh = *(uint32_t *)(b + i) & 31;
846 *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> sh;
847 }
848 clear_high(d, oprsz, desc);
849 }
850
851 void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc)
852 {
853 intptr_t oprsz = simd_oprsz(desc);
854 intptr_t i;
855
856 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
857 uint8_t sh = *(uint64_t *)(b + i) & 63;
858 *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> sh;
859 }
860 clear_high(d, oprsz, desc);
861 }
862
863 void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc)
864 {
865 intptr_t oprsz = simd_oprsz(desc);
866 intptr_t i;
867
868 for (i = 0; i < oprsz; i += sizeof(int8_t)) {
869 uint8_t sh = *(uint8_t *)(b + i) & 7;
870 *(int8_t *)(d + i) = *(int8_t *)(a + i) >> sh;
871 }
872 clear_high(d, oprsz, desc);
873 }
874
875 void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc)
876 {
877 intptr_t oprsz = simd_oprsz(desc);
878 intptr_t i;
879
880 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
881 uint8_t sh = *(uint16_t *)(b + i) & 15;
882 *(int16_t *)(d + i) = *(int16_t *)(a + i) >> sh;
883 }
884 clear_high(d, oprsz, desc);
885 }
886
887 void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc)
888 {
889 intptr_t oprsz = simd_oprsz(desc);
890 intptr_t i;
891
892 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
893 uint8_t sh = *(uint32_t *)(b + i) & 31;
894 *(int32_t *)(d + i) = *(int32_t *)(a + i) >> sh;
895 }
896 clear_high(d, oprsz, desc);
897 }
898
899 void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
900 {
901 intptr_t oprsz = simd_oprsz(desc);
902 intptr_t i;
903
904 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
905 uint8_t sh = *(uint64_t *)(b + i) & 63;
906 *(int64_t *)(d + i) = *(int64_t *)(a + i) >> sh;
907 }
908 clear_high(d, oprsz, desc);
909 }
910
911 void HELPER(gvec_rotl8v)(void *d, void *a, void *b, uint32_t desc)
912 {
913 intptr_t oprsz = simd_oprsz(desc);
914 intptr_t i;
915
916 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
917 uint8_t sh = *(uint8_t *)(b + i) & 7;
918 *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), sh);
919 }
920 clear_high(d, oprsz, desc);
921 }
922
923 void HELPER(gvec_rotl16v)(void *d, void *a, void *b, uint32_t desc)
924 {
925 intptr_t oprsz = simd_oprsz(desc);
926 intptr_t i;
927
928 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
929 uint8_t sh = *(uint16_t *)(b + i) & 15;
930 *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), sh);
931 }
932 clear_high(d, oprsz, desc);
933 }
934
935 void HELPER(gvec_rotl32v)(void *d, void *a, void *b, uint32_t desc)
936 {
937 intptr_t oprsz = simd_oprsz(desc);
938 intptr_t i;
939
940 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
941 uint8_t sh = *(uint32_t *)(b + i) & 31;
942 *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), sh);
943 }
944 clear_high(d, oprsz, desc);
945 }
946
947 void HELPER(gvec_rotl64v)(void *d, void *a, void *b, uint32_t desc)
948 {
949 intptr_t oprsz = simd_oprsz(desc);
950 intptr_t i;
951
952 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
953 uint8_t sh = *(uint64_t *)(b + i) & 63;
954 *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), sh);
955 }
956 clear_high(d, oprsz, desc);
957 }
958
959 void HELPER(gvec_rotr8v)(void *d, void *a, void *b, uint32_t desc)
960 {
961 intptr_t oprsz = simd_oprsz(desc);
962 intptr_t i;
963
964 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
965 uint8_t sh = *(uint8_t *)(b + i) & 7;
966 *(uint8_t *)(d + i) = ror8(*(uint8_t *)(a + i), sh);
967 }
968 clear_high(d, oprsz, desc);
969 }
970
971 void HELPER(gvec_rotr16v)(void *d, void *a, void *b, uint32_t desc)
972 {
973 intptr_t oprsz = simd_oprsz(desc);
974 intptr_t i;
975
976 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
977 uint8_t sh = *(uint16_t *)(b + i) & 15;
978 *(uint16_t *)(d + i) = ror16(*(uint16_t *)(a + i), sh);
979 }
980 clear_high(d, oprsz, desc);
981 }
982
983 void HELPER(gvec_rotr32v)(void *d, void *a, void *b, uint32_t desc)
984 {
985 intptr_t oprsz = simd_oprsz(desc);
986 intptr_t i;
987
988 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
989 uint8_t sh = *(uint32_t *)(b + i) & 31;
990 *(uint32_t *)(d + i) = ror32(*(uint32_t *)(a + i), sh);
991 }
992 clear_high(d, oprsz, desc);
993 }
994
995 void HELPER(gvec_rotr64v)(void *d, void *a, void *b, uint32_t desc)
996 {
997 intptr_t oprsz = simd_oprsz(desc);
998 intptr_t i;
999
1000 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
1001 uint8_t sh = *(uint64_t *)(b + i) & 63;
1002 *(uint64_t *)(d + i) = ror64(*(uint64_t *)(a + i), sh);
1003 }
1004 clear_high(d, oprsz, desc);
1005 }
1006
1007 #define DO_CMP1(NAME, TYPE, OP) \
1008 void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \
1009 { \
1010 intptr_t oprsz = simd_oprsz(desc); \
1011 intptr_t i; \
1012 for (i = 0; i < oprsz; i += sizeof(TYPE)) { \
1013 *(TYPE *)(d + i) = -(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \
1014 } \
1015 clear_high(d, oprsz, desc); \
1016 }
1017
1018 #define DO_CMP2(SZ) \
1019 DO_CMP1(gvec_eq##SZ, uint##SZ##_t, ==) \
1020 DO_CMP1(gvec_ne##SZ, uint##SZ##_t, !=) \
1021 DO_CMP1(gvec_lt##SZ, int##SZ##_t, <) \
1022 DO_CMP1(gvec_le##SZ, int##SZ##_t, <=) \
1023 DO_CMP1(gvec_ltu##SZ, uint##SZ##_t, <) \
1024 DO_CMP1(gvec_leu##SZ, uint##SZ##_t, <=)
1025
1026 DO_CMP2(8)
1027 DO_CMP2(16)
1028 DO_CMP2(32)
1029 DO_CMP2(64)
1030
1031 #undef DO_CMP1
1032 #undef DO_CMP2
1033
1034 void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
1035 {
1036 intptr_t oprsz = simd_oprsz(desc);
1037 intptr_t i;
1038
1039 for (i = 0; i < oprsz; i += sizeof(int8_t)) {
1040 int r = *(int8_t *)(a + i) + *(int8_t *)(b + i);
1041 if (r > INT8_MAX) {
1042 r = INT8_MAX;
1043 } else if (r < INT8_MIN) {
1044 r = INT8_MIN;
1045 }
1046 *(int8_t *)(d + i) = r;
1047 }
1048 clear_high(d, oprsz, desc);
1049 }
1050
1051 void HELPER(gvec_ssadd16)(void *d, void *a, void *b, uint32_t desc)
1052 {
1053 intptr_t oprsz = simd_oprsz(desc);
1054 intptr_t i;
1055
1056 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
1057 int r = *(int16_t *)(a + i) + *(int16_t *)(b + i);
1058 if (r > INT16_MAX) {
1059 r = INT16_MAX;
1060 } else if (r < INT16_MIN) {
1061 r = INT16_MIN;
1062 }
1063 *(int16_t *)(d + i) = r;
1064 }
1065 clear_high(d, oprsz, desc);
1066 }
1067
1068 void HELPER(gvec_ssadd32)(void *d, void *a, void *b, uint32_t desc)
1069 {
1070 intptr_t oprsz = simd_oprsz(desc);
1071 intptr_t i;
1072
1073 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
1074 int32_t ai = *(int32_t *)(a + i);
1075 int32_t bi = *(int32_t *)(b + i);
1076 int32_t di = ai + bi;
1077 if (((di ^ ai) &~ (ai ^ bi)) < 0) {
1078 /* Signed overflow. */
1079 di = (di < 0 ? INT32_MAX : INT32_MIN);
1080 }
1081 *(int32_t *)(d + i) = di;
1082 }
1083 clear_high(d, oprsz, desc);
1084 }
1085
1086 void HELPER(gvec_ssadd64)(void *d, void *a, void *b, uint32_t desc)
1087 {
1088 intptr_t oprsz = simd_oprsz(desc);
1089 intptr_t i;
1090
1091 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
1092 int64_t ai = *(int64_t *)(a + i);
1093 int64_t bi = *(int64_t *)(b + i);
1094 int64_t di = ai + bi;
1095 if (((di ^ ai) &~ (ai ^ bi)) < 0) {
1096 /* Signed overflow. */
1097 di = (di < 0 ? INT64_MAX : INT64_MIN);
1098 }
1099 *(int64_t *)(d + i) = di;
1100 }
1101 clear_high(d, oprsz, desc);
1102 }
1103
1104 void HELPER(gvec_sssub8)(void *d, void *a, void *b, uint32_t desc)
1105 {
1106 intptr_t oprsz = simd_oprsz(desc);
1107 intptr_t i;
1108
1109 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
1110 int r = *(int8_t *)(a + i) - *(int8_t *)(b + i);
1111 if (r > INT8_MAX) {
1112 r = INT8_MAX;
1113 } else if (r < INT8_MIN) {
1114 r = INT8_MIN;
1115 }
1116 *(uint8_t *)(d + i) = r;
1117 }
1118 clear_high(d, oprsz, desc);
1119 }
1120
1121 void HELPER(gvec_sssub16)(void *d, void *a, void *b, uint32_t desc)
1122 {
1123 intptr_t oprsz = simd_oprsz(desc);
1124 intptr_t i;
1125
1126 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
1127 int r = *(int16_t *)(a + i) - *(int16_t *)(b + i);
1128 if (r > INT16_MAX) {
1129 r = INT16_MAX;
1130 } else if (r < INT16_MIN) {
1131 r = INT16_MIN;
1132 }
1133 *(int16_t *)(d + i) = r;
1134 }
1135 clear_high(d, oprsz, desc);
1136 }
1137
1138 void HELPER(gvec_sssub32)(void *d, void *a, void *b, uint32_t desc)
1139 {
1140 intptr_t oprsz = simd_oprsz(desc);
1141 intptr_t i;
1142
1143 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
1144 int32_t ai = *(int32_t *)(a + i);
1145 int32_t bi = *(int32_t *)(b + i);
1146 int32_t di = ai - bi;
1147 if (((di ^ ai) & (ai ^ bi)) < 0) {
1148 /* Signed overflow. */
1149 di = (di < 0 ? INT32_MAX : INT32_MIN);
1150 }
1151 *(int32_t *)(d + i) = di;
1152 }
1153 clear_high(d, oprsz, desc);
1154 }
1155
1156 void HELPER(gvec_sssub64)(void *d, void *a, void *b, uint32_t desc)
1157 {
1158 intptr_t oprsz = simd_oprsz(desc);
1159 intptr_t i;
1160
1161 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
1162 int64_t ai = *(int64_t *)(a + i);
1163 int64_t bi = *(int64_t *)(b + i);
1164 int64_t di = ai - bi;
1165 if (((di ^ ai) & (ai ^ bi)) < 0) {
1166 /* Signed overflow. */
1167 di = (di < 0 ? INT64_MAX : INT64_MIN);
1168 }
1169 *(int64_t *)(d + i) = di;
1170 }
1171 clear_high(d, oprsz, desc);
1172 }
1173
1174 void HELPER(gvec_usadd8)(void *d, void *a, void *b, uint32_t desc)
1175 {
1176 intptr_t oprsz = simd_oprsz(desc);
1177 intptr_t i;
1178
1179 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
1180 unsigned r = *(uint8_t *)(a + i) + *(uint8_t *)(b + i);
1181 if (r > UINT8_MAX) {
1182 r = UINT8_MAX;
1183 }
1184 *(uint8_t *)(d + i) = r;
1185 }
1186 clear_high(d, oprsz, desc);
1187 }
1188
1189 void HELPER(gvec_usadd16)(void *d, void *a, void *b, uint32_t desc)
1190 {
1191 intptr_t oprsz = simd_oprsz(desc);
1192 intptr_t i;
1193
1194 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
1195 unsigned r = *(uint16_t *)(a + i) + *(uint16_t *)(b + i);
1196 if (r > UINT16_MAX) {
1197 r = UINT16_MAX;
1198 }
1199 *(uint16_t *)(d + i) = r;
1200 }
1201 clear_high(d, oprsz, desc);
1202 }
1203
1204 void HELPER(gvec_usadd32)(void *d, void *a, void *b, uint32_t desc)
1205 {
1206 intptr_t oprsz = simd_oprsz(desc);
1207 intptr_t i;
1208
1209 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
1210 uint32_t ai = *(uint32_t *)(a + i);
1211 uint32_t bi = *(uint32_t *)(b + i);
1212 uint32_t di = ai + bi;
1213 if (di < ai) {
1214 di = UINT32_MAX;
1215 }
1216 *(uint32_t *)(d + i) = di;
1217 }
1218 clear_high(d, oprsz, desc);
1219 }
1220
1221 void HELPER(gvec_usadd64)(void *d, void *a, void *b, uint32_t desc)
1222 {
1223 intptr_t oprsz = simd_oprsz(desc);
1224 intptr_t i;
1225
1226 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
1227 uint64_t ai = *(uint64_t *)(a + i);
1228 uint64_t bi = *(uint64_t *)(b + i);
1229 uint64_t di = ai + bi;
1230 if (di < ai) {
1231 di = UINT64_MAX;
1232 }
1233 *(uint64_t *)(d + i) = di;
1234 }
1235 clear_high(d, oprsz, desc);
1236 }
1237
1238 void HELPER(gvec_ussub8)(void *d, void *a, void *b, uint32_t desc)
1239 {
1240 intptr_t oprsz = simd_oprsz(desc);
1241 intptr_t i;
1242
1243 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
1244 int r = *(uint8_t *)(a + i) - *(uint8_t *)(b + i);
1245 if (r < 0) {
1246 r = 0;
1247 }
1248 *(uint8_t *)(d + i) = r;
1249 }
1250 clear_high(d, oprsz, desc);
1251 }
1252
1253 void HELPER(gvec_ussub16)(void *d, void *a, void *b, uint32_t desc)
1254 {
1255 intptr_t oprsz = simd_oprsz(desc);
1256 intptr_t i;
1257
1258 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
1259 int r = *(uint16_t *)(a + i) - *(uint16_t *)(b + i);
1260 if (r < 0) {
1261 r = 0;
1262 }
1263 *(uint16_t *)(d + i) = r;
1264 }
1265 clear_high(d, oprsz, desc);
1266 }
1267
1268 void HELPER(gvec_ussub32)(void *d, void *a, void *b, uint32_t desc)
1269 {
1270 intptr_t oprsz = simd_oprsz(desc);
1271 intptr_t i;
1272
1273 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
1274 uint32_t ai = *(uint32_t *)(a + i);
1275 uint32_t bi = *(uint32_t *)(b + i);
1276 uint32_t di = ai - bi;
1277 if (ai < bi) {
1278 di = 0;
1279 }
1280 *(uint32_t *)(d + i) = di;
1281 }
1282 clear_high(d, oprsz, desc);
1283 }
1284
1285 void HELPER(gvec_ussub64)(void *d, void *a, void *b, uint32_t desc)
1286 {
1287 intptr_t oprsz = simd_oprsz(desc);
1288 intptr_t i;
1289
1290 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
1291 uint64_t ai = *(uint64_t *)(a + i);
1292 uint64_t bi = *(uint64_t *)(b + i);
1293 uint64_t di = ai - bi;
1294 if (ai < bi) {
1295 di = 0;
1296 }
1297 *(uint64_t *)(d + i) = di;
1298 }
1299 clear_high(d, oprsz, desc);
1300 }
1301
1302 void HELPER(gvec_smin8)(void *d, void *a, void *b, uint32_t desc)
1303 {
1304 intptr_t oprsz = simd_oprsz(desc);
1305 intptr_t i;
1306
1307 for (i = 0; i < oprsz; i += sizeof(int8_t)) {
1308 int8_t aa = *(int8_t *)(a + i);
1309 int8_t bb = *(int8_t *)(b + i);
1310 int8_t dd = aa < bb ? aa : bb;
1311 *(int8_t *)(d + i) = dd;
1312 }
1313 clear_high(d, oprsz, desc);
1314 }
1315
1316 void HELPER(gvec_smin16)(void *d, void *a, void *b, uint32_t desc)
1317 {
1318 intptr_t oprsz = simd_oprsz(desc);
1319 intptr_t i;
1320
1321 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
1322 int16_t aa = *(int16_t *)(a + i);
1323 int16_t bb = *(int16_t *)(b + i);
1324 int16_t dd = aa < bb ? aa : bb;
1325 *(int16_t *)(d + i) = dd;
1326 }
1327 clear_high(d, oprsz, desc);
1328 }
1329
1330 void HELPER(gvec_smin32)(void *d, void *a, void *b, uint32_t desc)
1331 {
1332 intptr_t oprsz = simd_oprsz(desc);
1333 intptr_t i;
1334
1335 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
1336 int32_t aa = *(int32_t *)(a + i);
1337 int32_t bb = *(int32_t *)(b + i);
1338 int32_t dd = aa < bb ? aa : bb;
1339 *(int32_t *)(d + i) = dd;
1340 }
1341 clear_high(d, oprsz, desc);
1342 }
1343
1344 void HELPER(gvec_smin64)(void *d, void *a, void *b, uint32_t desc)
1345 {
1346 intptr_t oprsz = simd_oprsz(desc);
1347 intptr_t i;
1348
1349 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
1350 int64_t aa = *(int64_t *)(a + i);
1351 int64_t bb = *(int64_t *)(b + i);
1352 int64_t dd = aa < bb ? aa : bb;
1353 *(int64_t *)(d + i) = dd;
1354 }
1355 clear_high(d, oprsz, desc);
1356 }
1357
1358 void HELPER(gvec_smax8)(void *d, void *a, void *b, uint32_t desc)
1359 {
1360 intptr_t oprsz = simd_oprsz(desc);
1361 intptr_t i;
1362
1363 for (i = 0; i < oprsz; i += sizeof(int8_t)) {
1364 int8_t aa = *(int8_t *)(a + i);
1365 int8_t bb = *(int8_t *)(b + i);
1366 int8_t dd = aa > bb ? aa : bb;
1367 *(int8_t *)(d + i) = dd;
1368 }
1369 clear_high(d, oprsz, desc);
1370 }
1371
1372 void HELPER(gvec_smax16)(void *d, void *a, void *b, uint32_t desc)
1373 {
1374 intptr_t oprsz = simd_oprsz(desc);
1375 intptr_t i;
1376
1377 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
1378 int16_t aa = *(int16_t *)(a + i);
1379 int16_t bb = *(int16_t *)(b + i);
1380 int16_t dd = aa > bb ? aa : bb;
1381 *(int16_t *)(d + i) = dd;
1382 }
1383 clear_high(d, oprsz, desc);
1384 }
1385
1386 void HELPER(gvec_smax32)(void *d, void *a, void *b, uint32_t desc)
1387 {
1388 intptr_t oprsz = simd_oprsz(desc);
1389 intptr_t i;
1390
1391 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
1392 int32_t aa = *(int32_t *)(a + i);
1393 int32_t bb = *(int32_t *)(b + i);
1394 int32_t dd = aa > bb ? aa : bb;
1395 *(int32_t *)(d + i) = dd;
1396 }
1397 clear_high(d, oprsz, desc);
1398 }
1399
1400 void HELPER(gvec_smax64)(void *d, void *a, void *b, uint32_t desc)
1401 {
1402 intptr_t oprsz = simd_oprsz(desc);
1403 intptr_t i;
1404
1405 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
1406 int64_t aa = *(int64_t *)(a + i);
1407 int64_t bb = *(int64_t *)(b + i);
1408 int64_t dd = aa > bb ? aa : bb;
1409 *(int64_t *)(d + i) = dd;
1410 }
1411 clear_high(d, oprsz, desc);
1412 }
1413
1414 void HELPER(gvec_umin8)(void *d, void *a, void *b, uint32_t desc)
1415 {
1416 intptr_t oprsz = simd_oprsz(desc);
1417 intptr_t i;
1418
1419 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
1420 uint8_t aa = *(uint8_t *)(a + i);
1421 uint8_t bb = *(uint8_t *)(b + i);
1422 uint8_t dd = aa < bb ? aa : bb;
1423 *(uint8_t *)(d + i) = dd;
1424 }
1425 clear_high(d, oprsz, desc);
1426 }
1427
1428 void HELPER(gvec_umin16)(void *d, void *a, void *b, uint32_t desc)
1429 {
1430 intptr_t oprsz = simd_oprsz(desc);
1431 intptr_t i;
1432
1433 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
1434 uint16_t aa = *(uint16_t *)(a + i);
1435 uint16_t bb = *(uint16_t *)(b + i);
1436 uint16_t dd = aa < bb ? aa : bb;
1437 *(uint16_t *)(d + i) = dd;
1438 }
1439 clear_high(d, oprsz, desc);
1440 }
1441
1442 void HELPER(gvec_umin32)(void *d, void *a, void *b, uint32_t desc)
1443 {
1444 intptr_t oprsz = simd_oprsz(desc);
1445 intptr_t i;
1446
1447 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
1448 uint32_t aa = *(uint32_t *)(a + i);
1449 uint32_t bb = *(uint32_t *)(b + i);
1450 uint32_t dd = aa < bb ? aa : bb;
1451 *(uint32_t *)(d + i) = dd;
1452 }
1453 clear_high(d, oprsz, desc);
1454 }
1455
1456 void HELPER(gvec_umin64)(void *d, void *a, void *b, uint32_t desc)
1457 {
1458 intptr_t oprsz = simd_oprsz(desc);
1459 intptr_t i;
1460
1461 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
1462 uint64_t aa = *(uint64_t *)(a + i);
1463 uint64_t bb = *(uint64_t *)(b + i);
1464 uint64_t dd = aa < bb ? aa : bb;
1465 *(uint64_t *)(d + i) = dd;
1466 }
1467 clear_high(d, oprsz, desc);
1468 }
1469
1470 void HELPER(gvec_umax8)(void *d, void *a, void *b, uint32_t desc)
1471 {
1472 intptr_t oprsz = simd_oprsz(desc);
1473 intptr_t i;
1474
1475 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
1476 uint8_t aa = *(uint8_t *)(a + i);
1477 uint8_t bb = *(uint8_t *)(b + i);
1478 uint8_t dd = aa > bb ? aa : bb;
1479 *(uint8_t *)(d + i) = dd;
1480 }
1481 clear_high(d, oprsz, desc);
1482 }
1483
1484 void HELPER(gvec_umax16)(void *d, void *a, void *b, uint32_t desc)
1485 {
1486 intptr_t oprsz = simd_oprsz(desc);
1487 intptr_t i;
1488
1489 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
1490 uint16_t aa = *(uint16_t *)(a + i);
1491 uint16_t bb = *(uint16_t *)(b + i);
1492 uint16_t dd = aa > bb ? aa : bb;
1493 *(uint16_t *)(d + i) = dd;
1494 }
1495 clear_high(d, oprsz, desc);
1496 }
1497
1498 void HELPER(gvec_umax32)(void *d, void *a, void *b, uint32_t desc)
1499 {
1500 intptr_t oprsz = simd_oprsz(desc);
1501 intptr_t i;
1502
1503 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
1504 uint32_t aa = *(uint32_t *)(a + i);
1505 uint32_t bb = *(uint32_t *)(b + i);
1506 uint32_t dd = aa > bb ? aa : bb;
1507 *(uint32_t *)(d + i) = dd;
1508 }
1509 clear_high(d, oprsz, desc);
1510 }
1511
1512 void HELPER(gvec_umax64)(void *d, void *a, void *b, uint32_t desc)
1513 {
1514 intptr_t oprsz = simd_oprsz(desc);
1515 intptr_t i;
1516
1517 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
1518 uint64_t aa = *(uint64_t *)(a + i);
1519 uint64_t bb = *(uint64_t *)(b + i);
1520 uint64_t dd = aa > bb ? aa : bb;
1521 *(uint64_t *)(d + i) = dd;
1522 }
1523 clear_high(d, oprsz, desc);
1524 }
1525
1526 void HELPER(gvec_bitsel)(void *d, void *a, void *b, void *c, uint32_t desc)
1527 {
1528 intptr_t oprsz = simd_oprsz(desc);
1529 intptr_t i;
1530
1531 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
1532 uint64_t aa = *(uint64_t *)(a + i);
1533 uint64_t bb = *(uint64_t *)(b + i);
1534 uint64_t cc = *(uint64_t *)(c + i);
1535 *(uint64_t *)(d + i) = (bb & aa) | (cc & ~aa);
1536 }
1537 clear_high(d, oprsz, desc);
1538 }