52#ifndef INCLUDED_volk_32f_index_min_32u_a_H
53#define INCLUDED_volk_32f_index_min_32u_a_H
62static inline void volk_32f_index_min_32u_a_sse4_1(uint32_t* target,
66 const uint32_t quarterPoints = num_points / 4;
68 float* inputPtr = (
float*)source;
70 __m128 indexIncrementValues = _mm_set1_ps(4);
71 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
73 float min = source[0];
75 __m128 minValues = _mm_set1_ps(min);
76 __m128 minValuesIndex = _mm_setzero_ps();
77 __m128 compareResults;
83 for (uint32_t number = 0; number < quarterPoints; number++) {
85 currentValues = _mm_load_ps(inputPtr);
87 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
89 compareResults = _mm_cmplt_ps(currentValues, minValues);
91 minValuesIndex = _mm_blendv_ps(minValuesIndex, currentIndexes, compareResults);
92 minValues = _mm_blendv_ps(minValues, currentValues, compareResults);
96 _mm_store_ps(minValuesBuffer, minValues);
97 _mm_store_ps(minIndexesBuffer, minValuesIndex);
99 for (uint32_t number = 0; number < 4; number++) {
100 if (minValuesBuffer[number] < min) {
101 index = minIndexesBuffer[number];
102 min = minValuesBuffer[number];
103 }
else if (minValuesBuffer[number] == min) {
104 if (index > minIndexesBuffer[number])
105 index = minIndexesBuffer[number];
109 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
110 if (source[number] < min) {
112 min = source[number];
115 target[0] = (uint32_t)index;
123#include <xmmintrin.h>
128 const uint32_t quarterPoints = num_points / 4;
130 float* inputPtr = (
float*)source;
132 __m128 indexIncrementValues = _mm_set1_ps(4);
133 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
135 float min = source[0];
137 __m128 minValues = _mm_set1_ps(min);
138 __m128 minValuesIndex = _mm_setzero_ps();
139 __m128 compareResults;
140 __m128 currentValues;
145 for (uint32_t number = 0; number < quarterPoints; number++) {
147 currentValues = _mm_load_ps(inputPtr);
149 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
151 compareResults = _mm_cmplt_ps(currentValues, minValues);
153 minValuesIndex = _mm_or_ps(_mm_and_ps(compareResults, currentIndexes),
154 _mm_andnot_ps(compareResults, minValuesIndex));
156 minValues = _mm_or_ps(_mm_and_ps(compareResults, currentValues),
157 _mm_andnot_ps(compareResults, minValues));
161 _mm_store_ps(minValuesBuffer, minValues);
162 _mm_store_ps(minIndexesBuffer, minValuesIndex);
164 for (uint32_t number = 0; number < 4; number++) {
165 if (minValuesBuffer[number] < min) {
166 index = minIndexesBuffer[number];
167 min = minValuesBuffer[number];
168 }
else if (minValuesBuffer[number] == min) {
169 if (index > minIndexesBuffer[number])
170 index = minIndexesBuffer[number];
174 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
175 if (source[number] < min) {
177 min = source[number];
180 target[0] = (uint32_t)index;
187#include <immintrin.h>
192 const uint32_t quarterPoints = num_points / 8;
194 float* inputPtr = (
float*)source;
196 __m256 indexIncrementValues = _mm256_set1_ps(8);
197 __m256 currentIndexes = _mm256_set_ps(-1, -2, -3, -4, -5, -6, -7, -8);
199 float min = source[0];
201 __m256 minValues = _mm256_set1_ps(min);
202 __m256 minValuesIndex = _mm256_setzero_ps();
203 __m256 compareResults;
204 __m256 currentValues;
209 for (uint32_t number = 0; number < quarterPoints; number++) {
210 currentValues = _mm256_load_ps(inputPtr);
212 currentIndexes = _mm256_add_ps(currentIndexes, indexIncrementValues);
213 compareResults = _mm256_cmp_ps(currentValues, minValues, _CMP_LT_OS);
214 minValuesIndex = _mm256_blendv_ps(minValuesIndex, currentIndexes, compareResults);
215 minValues = _mm256_blendv_ps(minValues, currentValues, compareResults);
219 _mm256_store_ps(minValuesBuffer, minValues);
220 _mm256_store_ps(minIndexesBuffer, minValuesIndex);
222 for (uint32_t number = 0; number < 8; number++) {
223 if (minValuesBuffer[number] < min) {
224 index = minIndexesBuffer[number];
225 min = minValuesBuffer[number];
226 }
else if (minValuesBuffer[number] == min) {
227 if (index > minIndexesBuffer[number])
228 index = minIndexesBuffer[number];
232 for (uint32_t number = quarterPoints * 8; number < num_points; number++) {
233 if (source[number] < min) {
235 min = source[number];
238 target[0] = (uint32_t)index;
250 const uint32_t quarterPoints = num_points / 4;
252 float* inputPtr = (
float*)source;
253 float32x4_t indexIncrementValues = vdupq_n_f32(4);
255 float currentIndexes_float[4] = { -4.0f, -3.0f, -2.0f, -1.0f };
256 float32x4_t currentIndexes = vld1q_f32(currentIndexes_float);
258 float min = source[0];
260 float32x4_t minValues = vdupq_n_f32(min);
261 uint32x4_t minValuesIndex = vmovq_n_u32(0);
262 uint32x4_t compareResults;
263 uint32x4_t currentIndexes_u;
264 float32x4_t currentValues;
269 for (uint32_t number = 0; number < quarterPoints; number++) {
270 currentValues = vld1q_f32(inputPtr);
272 currentIndexes = vaddq_f32(currentIndexes, indexIncrementValues);
273 currentIndexes_u = vcvtq_u32_f32(currentIndexes);
274 compareResults = vcgeq_f32(currentValues, minValues);
275 minValuesIndex = vorrq_u32(vandq_u32(compareResults, minValuesIndex),
276 vbicq_u32(currentIndexes_u, compareResults));
277 minValues = vminq_f32(currentValues, minValues);
281 vst1q_f32(minValuesBuffer, minValues);
282 vst1q_f32(minIndexesBuffer, vcvtq_f32_u32(minValuesIndex));
283 for (uint32_t number = 0; number < 4; number++) {
284 if (minValuesBuffer[number] < min) {
285 index = minIndexesBuffer[number];
286 min = minValuesBuffer[number];
287 }
else if (minValuesBuffer[number] == min) {
288 if (index > minIndexesBuffer[number])
289 index = minIndexesBuffer[number];
293 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
294 if (source[number] < min) {
296 min = source[number];
299 target[0] = (uint32_t)index;
310volk_32f_index_min_32u_neonv8(uint32_t* target,
const float* source, uint32_t num_points)
315 const uint32_t quarter_points = num_points / 4;
316 const float* inputPtr = source;
319 uint32x4_t vec_indices = { 0, 1, 2, 3 };
320 const uint32x4_t vec_incr = vdupq_n_u32(4);
322 float32x4_t vec_min = vdupq_n_f32(FLT_MAX);
323 uint32x4_t vec_min_idx = vdupq_n_u32(0);
325 for (uint32_t i = 0; i < quarter_points; i++) {
326 float32x4_t vec_val = vld1q_f32(inputPtr);
330 uint32x4_t lt_mask = vcltq_f32(vec_val, vec_min);
331 vec_min_idx = vbslq_u32(lt_mask, vec_indices, vec_min_idx);
334 vec_min = vminq_f32(vec_val, vec_min);
336 vec_indices = vaddq_u32(vec_indices, vec_incr);
340 float min_val = vminvq_f32(vec_min);
343 uint32x4_t min_mask = vceqq_f32(vec_min, vdupq_n_f32(min_val));
344 uint32x4_t idx_masked = vbslq_u32(min_mask, vec_min_idx, vdupq_n_u32(UINT32_MAX));
345 uint32_t result_idx = vminvq_u32(idx_masked);
348 for (uint32_t i = quarter_points * 4; i < num_points; i++) {
349 if (source[i] < min_val) {
355 *target = result_idx;
361#ifdef LV_HAVE_GENERIC
366 float min = source[0];
369 for (uint32_t i = 1; i < num_points; ++i) {
370 if (source[i] < min) {
380#ifdef LV_HAVE_AVX512F
381#include <immintrin.h>
383static inline void volk_32f_index_min_32u_a_avx512f(uint32_t* target,
387 if (num_points > 0) {
389 const uint32_t sixteenthPoints = num_points / 16;
391 const float* inputPtr = source;
393 __m512 indexIncrementValues = _mm512_set1_ps(16);
394 __m512 currentIndexes = _mm512_set_ps(
395 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16);
397 float min = source[0];
399 __m512 minValues = _mm512_set1_ps(min);
400 __m512 minValuesIndex = _mm512_setzero_ps();
401 __mmask16 compareResults;
402 __m512 currentValues;
407 for (; number < sixteenthPoints; number++) {
408 currentValues = _mm512_load_ps(inputPtr);
410 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrementValues);
411 compareResults = _mm512_cmp_ps_mask(currentValues, minValues, _CMP_LT_OS);
413 _mm512_mask_blend_ps(compareResults, minValuesIndex, currentIndexes);
414 minValues = _mm512_mask_blend_ps(compareResults, minValues, currentValues);
418 _mm512_store_ps(minValuesBuffer, minValues);
419 _mm512_store_ps(minIndexesBuffer, minValuesIndex);
421 for (number = 0; number < 16; number++) {
422 if (minValuesBuffer[number] < min) {
423 index = minIndexesBuffer[number];
424 min = minValuesBuffer[number];
425 }
else if (minValuesBuffer[number] == min) {
426 if (index > minIndexesBuffer[number])
427 index = minIndexesBuffer[number];
431 number = sixteenthPoints * 16;
432 for (; number < num_points; number++) {
433 if (source[number] < min) {
435 min = source[number];
438 target[0] = (uint32_t)index;
447#ifndef INCLUDED_volk_32f_index_min_32u_u_H
448#define INCLUDED_volk_32f_index_min_32u_u_H
456#include <immintrin.h>
461 const uint32_t quarterPoints = num_points / 8;
463 float* inputPtr = (
float*)source;
465 __m256 indexIncrementValues = _mm256_set1_ps(8);
466 __m256 currentIndexes = _mm256_set_ps(-1, -2, -3, -4, -5, -6, -7, -8);
468 float min = source[0];
470 __m256 minValues = _mm256_set1_ps(min);
471 __m256 minValuesIndex = _mm256_setzero_ps();
472 __m256 compareResults;
473 __m256 currentValues;
478 for (uint32_t number = 0; number < quarterPoints; number++) {
479 currentValues = _mm256_loadu_ps(inputPtr);
481 currentIndexes = _mm256_add_ps(currentIndexes, indexIncrementValues);
482 compareResults = _mm256_cmp_ps(currentValues, minValues, _CMP_LT_OS);
483 minValuesIndex = _mm256_blendv_ps(minValuesIndex, currentIndexes, compareResults);
484 minValues = _mm256_blendv_ps(minValues, currentValues, compareResults);
488 _mm256_store_ps(minValuesBuffer, minValues);
489 _mm256_store_ps(minIndexesBuffer, minValuesIndex);
491 for (uint32_t number = 0; number < 8; number++) {
492 if (minValuesBuffer[number] < min) {
493 index = minIndexesBuffer[number];
494 min = minValuesBuffer[number];
495 }
else if (minValuesBuffer[number] == min) {
496 if (index > minIndexesBuffer[number])
497 index = minIndexesBuffer[number];
501 for (uint32_t number = quarterPoints * 8; number < num_points; number++) {
502 if (source[number] < min) {
504 min = source[number];
507 target[0] = (uint32_t)index;
514#include <smmintrin.h>
516static inline void volk_32f_index_min_32u_u_sse4_1(uint32_t* target,
520 const uint32_t quarterPoints = num_points / 4;
522 float* inputPtr = (
float*)source;
524 __m128 indexIncrementValues = _mm_set1_ps(4);
525 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
527 float min = source[0];
529 __m128 minValues = _mm_set1_ps(min);
530 __m128 minValuesIndex = _mm_setzero_ps();
531 __m128 compareResults;
532 __m128 currentValues;
537 for (uint32_t number = 0; number < quarterPoints; number++) {
538 currentValues = _mm_loadu_ps(inputPtr);
540 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
541 compareResults = _mm_cmplt_ps(currentValues, minValues);
542 minValuesIndex = _mm_blendv_ps(minValuesIndex, currentIndexes, compareResults);
543 minValues = _mm_blendv_ps(minValues, currentValues, compareResults);
547 _mm_store_ps(minValuesBuffer, minValues);
548 _mm_store_ps(minIndexesBuffer, minValuesIndex);
550 for (uint32_t number = 0; number < 4; number++) {
551 if (minValuesBuffer[number] < min) {
552 index = minIndexesBuffer[number];
553 min = minValuesBuffer[number];
554 }
else if (minValuesBuffer[number] == min) {
555 if (index > minIndexesBuffer[number])
556 index = minIndexesBuffer[number];
560 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
561 if (source[number] < min) {
563 min = source[number];
566 target[0] = (uint32_t)index;
572#include <xmmintrin.h>
577 const uint32_t quarterPoints = num_points / 4;
579 float* inputPtr = (
float*)source;
581 __m128 indexIncrementValues = _mm_set1_ps(4);
582 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
584 float min = source[0];
586 __m128 minValues = _mm_set1_ps(min);
587 __m128 minValuesIndex = _mm_setzero_ps();
588 __m128 compareResults;
589 __m128 currentValues;
594 for (uint32_t number = 0; number < quarterPoints; number++) {
595 currentValues = _mm_loadu_ps(inputPtr);
597 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
598 compareResults = _mm_cmplt_ps(currentValues, minValues);
599 minValuesIndex = _mm_or_ps(_mm_and_ps(compareResults, currentIndexes),
600 _mm_andnot_ps(compareResults, minValuesIndex));
601 minValues = _mm_or_ps(_mm_and_ps(compareResults, currentValues),
602 _mm_andnot_ps(compareResults, minValues));
606 _mm_store_ps(minValuesBuffer, minValues);
607 _mm_store_ps(minIndexesBuffer, minValuesIndex);
609 for (uint32_t number = 0; number < 4; number++) {
610 if (minValuesBuffer[number] < min) {
611 index = minIndexesBuffer[number];
612 min = minValuesBuffer[number];
613 }
else if (minValuesBuffer[number] == min) {
614 if (index > minIndexesBuffer[number])
615 index = minIndexesBuffer[number];
619 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
620 if (source[number] < min) {
622 min = source[number];
625 target[0] = (uint32_t)index;
630#ifdef LV_HAVE_AVX512F
631#include <immintrin.h>
633static inline void volk_32f_index_min_32u_u_avx512f(uint32_t* target,
637 if (num_points > 0) {
639 const uint32_t sixteenthPoints = num_points / 16;
641 const float* inputPtr = source;
643 __m512 indexIncrementValues = _mm512_set1_ps(16);
644 __m512 currentIndexes = _mm512_set_ps(
645 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16);
647 float min = source[0];
649 __m512 minValues = _mm512_set1_ps(min);
650 __m512 minValuesIndex = _mm512_setzero_ps();
651 __mmask16 compareResults;
652 __m512 currentValues;
657 for (; number < sixteenthPoints; number++) {
658 currentValues = _mm512_loadu_ps(inputPtr);
660 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrementValues);
661 compareResults = _mm512_cmp_ps_mask(currentValues, minValues, _CMP_LT_OS);
663 _mm512_mask_blend_ps(compareResults, minValuesIndex, currentIndexes);
664 minValues = _mm512_mask_blend_ps(compareResults, minValues, currentValues);
668 _mm512_store_ps(minValuesBuffer, minValues);
669 _mm512_store_ps(minIndexesBuffer, minValuesIndex);
671 for (number = 0; number < 16; number++) {
672 if (minValuesBuffer[number] < min) {
673 index = minIndexesBuffer[number];
674 min = minValuesBuffer[number];
675 }
else if (minValuesBuffer[number] == min) {
676 if (index > minIndexesBuffer[number])
677 index = minIndexesBuffer[number];
681 number = sixteenthPoints * 16;
682 for (; number < num_points; number++) {
683 if (source[number] < min) {
685 min = source[number];
688 target[0] = (uint32_t)index;
696#include <riscv_vector.h>
699volk_32f_index_min_32u_rvv(uint32_t* target,
const float* src0, uint32_t num_points)
701 vfloat32m4_t vmin = __riscv_vfmv_v_f_f32m4(FLT_MAX, __riscv_vsetvlmax_e32m4());
702 vuint32m4_t vmini = __riscv_vmv_v_x_u32m4(0, __riscv_vsetvlmax_e32m4());
703 vuint32m4_t vidx = __riscv_vid_v_u32m4(__riscv_vsetvlmax_e32m4());
704 size_t n = num_points;
705 for (
size_t vl; n > 0; n -= vl, src0 += vl) {
706 vl = __riscv_vsetvl_e32m4(n);
707 vfloat32m4_t v = __riscv_vle32_v_f32m4(src0, vl);
708 vbool8_t m = __riscv_vmflt(v, vmin, vl);
709 vmin = __riscv_vfmin_tu(vmin, vmin, v, vl);
710 vmini = __riscv_vmerge_tu(vmini, vmini, vidx, m, vl);
711 vidx = __riscv_vadd(vidx, vl, __riscv_vsetvlmax_e32m4());
713 size_t vl = __riscv_vsetvlmax_e32m4();
714 float min = __riscv_vfmv_f(__riscv_vfredmin(
RISCV_SHRINK4(vfmin, f, 32, vmin),
715 __riscv_vfmv_v_f_f32m1(FLT_MAX, 1),
716 __riscv_vsetvlmax_e32m1()));
718 vbool8_t m = __riscv_vmfeq(vmin, min, vl);
719 vuint32m4_t idx_masked =
720 __riscv_vmerge(__riscv_vmv_v_x_u32m4(UINT32_MAX, vl), vmini, m, vl);
722 *target = __riscv_vmv_x(__riscv_vredminu(
RISCV_SHRINK4(vminu, u, 32, idx_masked),
723 __riscv_vmv_v_x_u32m1(UINT32_MAX, 1),
724 __riscv_vsetvlmax_e32m1()));