Vector Optimized Library of Kernels 3.3.0
Architecture-tuned implementations of math kernels
Loading...
Searching...
No Matches
volk_32f_index_min_16u.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2021 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
57
58#ifndef INCLUDED_volk_32f_index_min_16u_a_H
59#define INCLUDED_volk_32f_index_min_16u_a_H
60
61#include <inttypes.h>
62#include <limits.h>
63#include <stdio.h>
64#include <volk/volk_common.h>
65
66#ifdef LV_HAVE_AVX
67#include <immintrin.h>
68
69static inline void
70volk_32f_index_min_16u_a_avx(uint16_t* target, const float* source, uint32_t num_points)
71{
72 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
73 const uint32_t eighthPoints = num_points / 8;
74
75 float* inputPtr = (float*)source;
76
77 __m256 indexIncrementValues = _mm256_set1_ps(8);
78 __m256 currentIndexes = _mm256_set_ps(-1, -2, -3, -4, -5, -6, -7, -8);
79
80 float min = source[0];
81 float index = 0;
82 __m256 minValues = _mm256_set1_ps(min);
83 __m256 minValuesIndex = _mm256_setzero_ps();
84 __m256 compareResults;
85 __m256 currentValues;
86
87 __VOLK_ATTR_ALIGNED(32) float minValuesBuffer[8];
88 __VOLK_ATTR_ALIGNED(32) float minIndexesBuffer[8];
89
90 for (uint32_t number = 0; number < eighthPoints; number++) {
91
92 currentValues = _mm256_load_ps(inputPtr);
93 inputPtr += 8;
94 currentIndexes = _mm256_add_ps(currentIndexes, indexIncrementValues);
95
96 compareResults = _mm256_cmp_ps(currentValues, minValues, _CMP_LT_OS);
97
98 minValuesIndex = _mm256_blendv_ps(minValuesIndex, currentIndexes, compareResults);
99 minValues = _mm256_blendv_ps(minValues, currentValues, compareResults);
100 }
101
102 // Calculate the smallest value from the remaining 4 points
103 _mm256_store_ps(minValuesBuffer, minValues);
104 _mm256_store_ps(minIndexesBuffer, minValuesIndex);
105
106 for (uint32_t number = 0; number < 8; number++) {
107 if (minValuesBuffer[number] < min) {
108 index = minIndexesBuffer[number];
109 min = minValuesBuffer[number];
110 } else if (minValuesBuffer[number] == min) {
111 if (index > minIndexesBuffer[number])
112 index = minIndexesBuffer[number];
113 }
114 }
115
116 for (uint32_t number = eighthPoints * 8; number < num_points; number++) {
117 if (source[number] < min) {
118 index = number;
119 min = source[number];
120 }
121 }
122 target[0] = (uint16_t)index;
123}
124
125#endif /*LV_HAVE_AVX*/
126
127#ifdef LV_HAVE_SSE4_1
128#include <smmintrin.h>
129
130static inline void volk_32f_index_min_16u_a_sse4_1(uint16_t* target,
131 const float* source,
132 uint32_t num_points)
133{
134 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
135 const uint32_t quarterPoints = num_points / 4;
136
137 float* inputPtr = (float*)source;
138
139 __m128 indexIncrementValues = _mm_set1_ps(4);
140 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
141
142 float min = source[0];
143 float index = 0;
144 __m128 minValues = _mm_set1_ps(min);
145 __m128 minValuesIndex = _mm_setzero_ps();
146 __m128 compareResults;
147 __m128 currentValues;
148
149 __VOLK_ATTR_ALIGNED(16) float minValuesBuffer[4];
150 __VOLK_ATTR_ALIGNED(16) float minIndexesBuffer[4];
151
152 for (uint32_t number = 0; number < quarterPoints; number++) {
153
154 currentValues = _mm_load_ps(inputPtr);
155 inputPtr += 4;
156 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
157
158 compareResults = _mm_cmplt_ps(currentValues, minValues);
159
160 minValuesIndex = _mm_blendv_ps(minValuesIndex, currentIndexes, compareResults);
161 minValues = _mm_blendv_ps(minValues, currentValues, compareResults);
162 }
163
164 // Calculate the smallest value from the remaining 4 points
165 _mm_store_ps(minValuesBuffer, minValues);
166 _mm_store_ps(minIndexesBuffer, minValuesIndex);
167
168 for (uint32_t number = 0; number < 4; number++) {
169 if (minValuesBuffer[number] < min) {
170 index = minIndexesBuffer[number];
171 min = minValuesBuffer[number];
172 } else if (minValuesBuffer[number] == min) {
173 if (index > minIndexesBuffer[number])
174 index = minIndexesBuffer[number];
175 }
176 }
177
178 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
179 if (source[number] < min) {
180 index = number;
181 min = source[number];
182 }
183 }
184 target[0] = (uint16_t)index;
185}
186
187#endif /*LV_HAVE_SSE4_1*/
188
189
190#ifdef LV_HAVE_SSE
191
192#include <xmmintrin.h>
193
194static inline void
195volk_32f_index_min_16u_a_sse(uint16_t* target, const float* source, uint32_t num_points)
196{
197 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
198 const uint32_t quarterPoints = num_points / 4;
199
200 float* inputPtr = (float*)source;
201
202 __m128 indexIncrementValues = _mm_set1_ps(4);
203 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
204
205 float min = source[0];
206 float index = 0;
207 __m128 minValues = _mm_set1_ps(min);
208 __m128 minValuesIndex = _mm_setzero_ps();
209 __m128 compareResults;
210 __m128 currentValues;
211
212 __VOLK_ATTR_ALIGNED(16) float minValuesBuffer[4];
213 __VOLK_ATTR_ALIGNED(16) float minIndexesBuffer[4];
214
215 for (uint32_t number = 0; number < quarterPoints; number++) {
216
217 currentValues = _mm_load_ps(inputPtr);
218 inputPtr += 4;
219 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
220
221 compareResults = _mm_cmplt_ps(currentValues, minValues);
222
223 minValuesIndex = _mm_or_ps(_mm_and_ps(compareResults, currentIndexes),
224 _mm_andnot_ps(compareResults, minValuesIndex));
225 minValues = _mm_or_ps(_mm_and_ps(compareResults, currentValues),
226 _mm_andnot_ps(compareResults, minValues));
227 }
228
229 // Calculate the smallest value from the remaining 4 points
230 _mm_store_ps(minValuesBuffer, minValues);
231 _mm_store_ps(minIndexesBuffer, minValuesIndex);
232
233 for (uint32_t number = 0; number < 4; number++) {
234 if (minValuesBuffer[number] < min) {
235 index = minIndexesBuffer[number];
236 min = minValuesBuffer[number];
237 } else if (minValuesBuffer[number] == min) {
238 if (index > minIndexesBuffer[number])
239 index = minIndexesBuffer[number];
240 }
241 }
242
243 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
244 if (source[number] < min) {
245 index = number;
246 min = source[number];
247 }
248 }
249 target[0] = (uint16_t)index;
250}
251
252#endif /*LV_HAVE_SSE*/
253
254
255#ifdef LV_HAVE_NEON
256#include <arm_neon.h>
257#include <float.h>
258#include <limits.h>
259
260static inline void
261volk_32f_index_min_16u_neon(uint16_t* target, const float* source, uint32_t num_points)
262{
263 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
264
265 if (num_points == 0)
266 return;
267
268 const uint32_t quarter_points = num_points / 4;
269 const float* inputPtr = source;
270
271 // Use integer indices directly
272 uint32x4_t vec_indices = { 0, 1, 2, 3 };
273 const uint32x4_t vec_incr = vdupq_n_u32(4);
274
275 float32x4_t vec_min = vdupq_n_f32(FLT_MAX);
276 uint32x4_t vec_min_idx = vdupq_n_u32(0);
277
278 for (uint32_t i = 0; i < quarter_points; i++) {
279 float32x4_t vec_val = vld1q_f32(inputPtr);
280 inputPtr += 4;
281
282 // Compare BEFORE min update to know which lanes change
283 uint32x4_t lt_mask = vcltq_f32(vec_val, vec_min);
284 vec_min_idx = vbslq_u32(lt_mask, vec_indices, vec_min_idx);
285
286 // vminq_f32 is single-cycle, no dependency on comparison result
287 vec_min = vminq_f32(vec_val, vec_min);
288
289 vec_indices = vaddq_u32(vec_indices, vec_incr);
290 }
291
292 // Scalar reduction
293 __VOLK_ATTR_ALIGNED(16) float min_buf[4];
294 __VOLK_ATTR_ALIGNED(16) uint32_t idx_buf[4];
295 vst1q_f32(min_buf, vec_min);
296 vst1q_u32(idx_buf, vec_min_idx);
297
298 float min_val = min_buf[0];
299 uint32_t result_idx = idx_buf[0];
300 for (int i = 1; i < 4; i++) {
301 if (min_buf[i] < min_val) {
302 min_val = min_buf[i];
303 result_idx = idx_buf[i];
304 } else if (min_buf[i] == min_val && idx_buf[i] < result_idx) {
305 result_idx = idx_buf[i];
306 }
307 }
308
309 // Handle tail
310 for (uint32_t i = quarter_points * 4; i < num_points; i++) {
311 if (source[i] < min_val) {
312 min_val = source[i];
313 result_idx = i;
314 }
315 }
316
317 *target = (uint16_t)result_idx;
318}
319
320#endif /*LV_HAVE_NEON*/
321
322
323#ifdef LV_HAVE_NEONV8
324#include <arm_neon.h>
325#include <float.h>
326#include <limits.h>
327
328static inline void
329volk_32f_index_min_16u_neonv8(uint16_t* target, const float* source, uint32_t num_points)
330{
331 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
332
333 if (num_points == 0)
334 return;
335
336 const uint32_t quarter_points = num_points / 4;
337 const float* inputPtr = source;
338
339 // Use integer indices directly (no float conversion overhead)
340 uint32x4_t vec_indices = { 0, 1, 2, 3 };
341 const uint32x4_t vec_incr = vdupq_n_u32(4);
342
343 float32x4_t vec_min = vdupq_n_f32(FLT_MAX);
344 uint32x4_t vec_min_idx = vdupq_n_u32(0);
345
346 for (uint32_t i = 0; i < quarter_points; i++) {
347 float32x4_t vec_val = vld1q_f32(inputPtr);
348 inputPtr += 4;
349
350 // Compare BEFORE min update to know which lanes change
351 uint32x4_t lt_mask = vcltq_f32(vec_val, vec_min);
352 vec_min_idx = vbslq_u32(lt_mask, vec_indices, vec_min_idx);
353
354 // vminq_f32 is single-cycle, no dependency on comparison result
355 vec_min = vminq_f32(vec_val, vec_min);
356
357 vec_indices = vaddq_u32(vec_indices, vec_incr);
358 }
359
360 // ARMv8 horizontal reduction
361 float min_val = vminvq_f32(vec_min);
362 uint32x4_t min_mask = vceqq_f32(vec_min, vdupq_n_f32(min_val));
363 uint32x4_t idx_masked = vbslq_u32(min_mask, vec_min_idx, vdupq_n_u32(UINT32_MAX));
364 uint32_t result_idx = vminvq_u32(idx_masked);
365
366 // Handle tail
367 for (uint32_t i = quarter_points * 4; i < num_points; i++) {
368 if (source[i] < min_val) {
369 min_val = source[i];
370 result_idx = i;
371 }
372 }
373
374 *target = (uint16_t)result_idx;
375}
376
377#endif /*LV_HAVE_NEONV8*/
378
379
380#ifdef LV_HAVE_GENERIC
381
382static inline void
383volk_32f_index_min_16u_generic(uint16_t* target, const float* source, uint32_t num_points)
384{
385 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
386
387 float min = source[0];
388 uint16_t index = 0;
389
390 for (uint32_t i = 1; i < num_points; ++i) {
391 if (source[i] < min) {
392 index = i;
393 min = source[i];
394 }
395 }
396 target[0] = index;
397}
398
399#endif /*LV_HAVE_GENERIC*/
400
401#ifdef LV_HAVE_AVX512F
402#include <immintrin.h>
403#include <limits.h>
404
405static inline void volk_32f_index_min_16u_a_avx512f(uint16_t* target,
406 const float* source,
407 uint32_t num_points)
408{
409 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
410
411 uint32_t number = 0;
412 const uint32_t sixteenthPoints = num_points / 16;
413
414 const float* inputPtr = source;
415
416 __m512 indexIncrementValues = _mm512_set1_ps(16);
417 __m512 currentIndexes = _mm512_set_ps(
418 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16);
419
420 float min = source[0];
421 float index = 0;
422 __m512 minValues = _mm512_set1_ps(min);
423 __m512 minValuesIndex = _mm512_setzero_ps();
424 __mmask16 compareResults;
425 __m512 currentValues;
426
427 __VOLK_ATTR_ALIGNED(64) float minValuesBuffer[16];
428 __VOLK_ATTR_ALIGNED(64) float minIndexesBuffer[16];
429
430 for (; number < sixteenthPoints; number++) {
431 currentValues = _mm512_load_ps(inputPtr);
432 inputPtr += 16;
433 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrementValues);
434 compareResults = _mm512_cmp_ps_mask(currentValues, minValues, _CMP_LT_OS);
435 minValuesIndex =
436 _mm512_mask_blend_ps(compareResults, minValuesIndex, currentIndexes);
437 minValues = _mm512_mask_blend_ps(compareResults, minValues, currentValues);
438 }
439
440 // Calculate the smallest value from the remaining 16 points
441 _mm512_store_ps(minValuesBuffer, minValues);
442 _mm512_store_ps(minIndexesBuffer, minValuesIndex);
443
444 for (number = 0; number < 16; number++) {
445 if (minValuesBuffer[number] < min) {
446 index = minIndexesBuffer[number];
447 min = minValuesBuffer[number];
448 } else if (minValuesBuffer[number] == min) {
449 if (index > minIndexesBuffer[number])
450 index = minIndexesBuffer[number];
451 }
452 }
453
454 number = sixteenthPoints * 16;
455 for (; number < num_points; number++) {
456 if (source[number] < min) {
457 index = number;
458 min = source[number];
459 }
460 }
461 target[0] = (uint16_t)index;
462}
463
464#endif /*LV_HAVE_AVX512F*/
465
466#endif /*INCLUDED_volk_32f_index_min_16u_a_H*/
467
468
469#ifndef INCLUDED_volk_32f_index_min_16u_u_H
470#define INCLUDED_volk_32f_index_min_16u_u_H
471
472#include <inttypes.h>
473#include <limits.h>
474#include <stdio.h>
475#include <volk/volk_common.h>
476
477#ifdef LV_HAVE_AVX
478#include <immintrin.h>
479
480static inline void
481volk_32f_index_min_16u_u_avx(uint16_t* target, const float* source, uint32_t num_points)
482{
483 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
484 const uint32_t eighthPoints = num_points / 8;
485
486 float* inputPtr = (float*)source;
487
488 __m256 indexIncrementValues = _mm256_set1_ps(8);
489 __m256 currentIndexes = _mm256_set_ps(-1, -2, -3, -4, -5, -6, -7, -8);
490
491 float min = source[0];
492 float index = 0;
493 __m256 minValues = _mm256_set1_ps(min);
494 __m256 minValuesIndex = _mm256_setzero_ps();
495 __m256 compareResults;
496 __m256 currentValues;
497
498 __VOLK_ATTR_ALIGNED(32) float minValuesBuffer[8];
499 __VOLK_ATTR_ALIGNED(32) float minIndexesBuffer[8];
500
501 for (uint32_t number = 0; number < eighthPoints; number++) {
502
503 currentValues = _mm256_loadu_ps(inputPtr);
504 inputPtr += 8;
505 currentIndexes = _mm256_add_ps(currentIndexes, indexIncrementValues);
506
507 compareResults = _mm256_cmp_ps(currentValues, minValues, _CMP_LT_OS);
508
509 minValuesIndex = _mm256_blendv_ps(minValuesIndex, currentIndexes, compareResults);
510 minValues = _mm256_blendv_ps(minValues, currentValues, compareResults);
511 }
512
513 // Calculate the smallest value from the remaining 4 points
514 _mm256_storeu_ps(minValuesBuffer, minValues);
515 _mm256_storeu_ps(minIndexesBuffer, minValuesIndex);
516
517 for (uint32_t number = 0; number < 8; number++) {
518 if (minValuesBuffer[number] < min) {
519 index = minIndexesBuffer[number];
520 min = minValuesBuffer[number];
521 } else if (minValuesBuffer[number] == min) {
522 if (index > minIndexesBuffer[number])
523 index = minIndexesBuffer[number];
524 }
525 }
526
527 for (uint32_t number = eighthPoints * 8; number < num_points; number++) {
528 if (source[number] < min) {
529 index = number;
530 min = source[number];
531 }
532 }
533 target[0] = (uint16_t)index;
534}
535
536#endif /*LV_HAVE_AVX*/
537
538#ifdef LV_HAVE_AVX512F
539#include <immintrin.h>
540#include <limits.h>
541
542static inline void volk_32f_index_min_16u_u_avx512f(uint16_t* target,
543 const float* source,
544 uint32_t num_points)
545{
546 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
547
548 uint32_t number = 0;
549 const uint32_t sixteenthPoints = num_points / 16;
550
551 const float* inputPtr = source;
552
553 __m512 indexIncrementValues = _mm512_set1_ps(16);
554 __m512 currentIndexes = _mm512_set_ps(
555 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16);
556
557 float min = source[0];
558 float index = 0;
559 __m512 minValues = _mm512_set1_ps(min);
560 __m512 minValuesIndex = _mm512_setzero_ps();
561 __mmask16 compareResults;
562 __m512 currentValues;
563
564 __VOLK_ATTR_ALIGNED(64) float minValuesBuffer[16];
565 __VOLK_ATTR_ALIGNED(64) float minIndexesBuffer[16];
566
567 for (; number < sixteenthPoints; number++) {
568 currentValues = _mm512_loadu_ps(inputPtr);
569 inputPtr += 16;
570 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrementValues);
571 compareResults = _mm512_cmp_ps_mask(currentValues, minValues, _CMP_LT_OS);
572 minValuesIndex =
573 _mm512_mask_blend_ps(compareResults, minValuesIndex, currentIndexes);
574 minValues = _mm512_mask_blend_ps(compareResults, minValues, currentValues);
575 }
576
577 // Calculate the smallest value from the remaining 16 points
578 _mm512_store_ps(minValuesBuffer, minValues);
579 _mm512_store_ps(minIndexesBuffer, minValuesIndex);
580
581 for (number = 0; number < 16; number++) {
582 if (minValuesBuffer[number] < min) {
583 index = minIndexesBuffer[number];
584 min = minValuesBuffer[number];
585 } else if (minValuesBuffer[number] == min) {
586 if (index > minIndexesBuffer[number])
587 index = minIndexesBuffer[number];
588 }
589 }
590
591 number = sixteenthPoints * 16;
592 for (; number < num_points; number++) {
593 if (source[number] < min) {
594 index = number;
595 min = source[number];
596 }
597 }
598 target[0] = (uint16_t)index;
599}
600
601#endif /*LV_HAVE_AVX512F*/
602
603#ifdef LV_HAVE_RVV
604#include <float.h>
605#include <riscv_vector.h>
606
607static inline void
608volk_32f_index_min_16u_rvv(uint16_t* target, const float* src0, uint32_t num_points)
609{
610 vfloat32m8_t vmin = __riscv_vfmv_v_f_f32m8(FLT_MAX, __riscv_vsetvlmax_e32m8());
611 vuint16m4_t vmini = __riscv_vmv_v_x_u16m4(0, __riscv_vsetvlmax_e16m4());
612 vuint16m4_t vidx = __riscv_vid_v_u16m4(__riscv_vsetvlmax_e16m4());
613 size_t n = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
614 for (size_t vl; n > 0; n -= vl, src0 += vl) {
615 vl = __riscv_vsetvl_e32m8(n);
616 vfloat32m8_t v = __riscv_vle32_v_f32m8(src0, vl);
617 vbool4_t m = __riscv_vmflt(v, vmin, vl);
618 vmin = __riscv_vfmin_tu(vmin, vmin, v, vl);
619 vmini = __riscv_vmerge_tu(vmini, vmini, vidx, m, vl);
620 vidx = __riscv_vadd(vidx, vl, __riscv_vsetvlmax_e16m4());
621 }
622 size_t vl = __riscv_vsetvlmax_e32m8();
623 float min = __riscv_vfmv_f(__riscv_vfredmin(RISCV_SHRINK8(vfmin, f, 32, vmin),
624 __riscv_vfmv_v_f_f32m1(FLT_MAX, 1),
625 __riscv_vsetvlmax_e32m1()));
626 // Find lanes with min value, set others to UINT16_MAX
627 vbool4_t m = __riscv_vmfeq(vmin, min, vl);
628 vuint16m4_t idx_masked = __riscv_vmerge(
629 __riscv_vmv_v_x_u16m4(UINT16_MAX, __riscv_vsetvlmax_e16m4()), vmini, m, vl);
630 // Find minimum index among lanes with min value
631 *target = __riscv_vmv_x(__riscv_vredminu(RISCV_SHRINK4(vminu, u, 16, idx_masked),
632 __riscv_vmv_v_x_u16m1(UINT16_MAX, 1),
633 __riscv_vsetvlmax_e16m1()));
634}
635#endif /*LV_HAVE_RVV*/
636
637#endif /*INCLUDED_volk_32f_index_min_16u_u_H*/