58#ifndef INCLUDED_volk_32f_index_min_16u_a_H
59#define INCLUDED_volk_32f_index_min_16u_a_H
72 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
73 const uint32_t eighthPoints = num_points / 8;
75 float* inputPtr = (
float*)source;
77 __m256 indexIncrementValues = _mm256_set1_ps(8);
78 __m256 currentIndexes = _mm256_set_ps(-1, -2, -3, -4, -5, -6, -7, -8);
80 float min = source[0];
82 __m256 minValues = _mm256_set1_ps(min);
83 __m256 minValuesIndex = _mm256_setzero_ps();
84 __m256 compareResults;
90 for (uint32_t number = 0; number < eighthPoints; number++) {
92 currentValues = _mm256_load_ps(inputPtr);
94 currentIndexes = _mm256_add_ps(currentIndexes, indexIncrementValues);
96 compareResults = _mm256_cmp_ps(currentValues, minValues, _CMP_LT_OS);
98 minValuesIndex = _mm256_blendv_ps(minValuesIndex, currentIndexes, compareResults);
99 minValues = _mm256_blendv_ps(minValues, currentValues, compareResults);
103 _mm256_store_ps(minValuesBuffer, minValues);
104 _mm256_store_ps(minIndexesBuffer, minValuesIndex);
106 for (uint32_t number = 0; number < 8; number++) {
107 if (minValuesBuffer[number] < min) {
108 index = minIndexesBuffer[number];
109 min = minValuesBuffer[number];
110 }
else if (minValuesBuffer[number] == min) {
111 if (index > minIndexesBuffer[number])
112 index = minIndexesBuffer[number];
116 for (uint32_t number = eighthPoints * 8; number < num_points; number++) {
117 if (source[number] < min) {
119 min = source[number];
122 target[0] = (uint16_t)index;
128#include <smmintrin.h>
130static inline void volk_32f_index_min_16u_a_sse4_1(uint16_t* target,
134 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
135 const uint32_t quarterPoints = num_points / 4;
137 float* inputPtr = (
float*)source;
139 __m128 indexIncrementValues = _mm_set1_ps(4);
140 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
142 float min = source[0];
144 __m128 minValues = _mm_set1_ps(min);
145 __m128 minValuesIndex = _mm_setzero_ps();
146 __m128 compareResults;
147 __m128 currentValues;
152 for (uint32_t number = 0; number < quarterPoints; number++) {
154 currentValues = _mm_load_ps(inputPtr);
156 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
158 compareResults = _mm_cmplt_ps(currentValues, minValues);
160 minValuesIndex = _mm_blendv_ps(minValuesIndex, currentIndexes, compareResults);
161 minValues = _mm_blendv_ps(minValues, currentValues, compareResults);
165 _mm_store_ps(minValuesBuffer, minValues);
166 _mm_store_ps(minIndexesBuffer, minValuesIndex);
168 for (uint32_t number = 0; number < 4; number++) {
169 if (minValuesBuffer[number] < min) {
170 index = minIndexesBuffer[number];
171 min = minValuesBuffer[number];
172 }
else if (minValuesBuffer[number] == min) {
173 if (index > minIndexesBuffer[number])
174 index = minIndexesBuffer[number];
178 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
179 if (source[number] < min) {
181 min = source[number];
184 target[0] = (uint16_t)index;
192#include <xmmintrin.h>
197 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
198 const uint32_t quarterPoints = num_points / 4;
200 float* inputPtr = (
float*)source;
202 __m128 indexIncrementValues = _mm_set1_ps(4);
203 __m128 currentIndexes = _mm_set_ps(-1, -2, -3, -4);
205 float min = source[0];
207 __m128 minValues = _mm_set1_ps(min);
208 __m128 minValuesIndex = _mm_setzero_ps();
209 __m128 compareResults;
210 __m128 currentValues;
215 for (uint32_t number = 0; number < quarterPoints; number++) {
217 currentValues = _mm_load_ps(inputPtr);
219 currentIndexes = _mm_add_ps(currentIndexes, indexIncrementValues);
221 compareResults = _mm_cmplt_ps(currentValues, minValues);
223 minValuesIndex = _mm_or_ps(_mm_and_ps(compareResults, currentIndexes),
224 _mm_andnot_ps(compareResults, minValuesIndex));
225 minValues = _mm_or_ps(_mm_and_ps(compareResults, currentValues),
226 _mm_andnot_ps(compareResults, minValues));
230 _mm_store_ps(minValuesBuffer, minValues);
231 _mm_store_ps(minIndexesBuffer, minValuesIndex);
233 for (uint32_t number = 0; number < 4; number++) {
234 if (minValuesBuffer[number] < min) {
235 index = minIndexesBuffer[number];
236 min = minValuesBuffer[number];
237 }
else if (minValuesBuffer[number] == min) {
238 if (index > minIndexesBuffer[number])
239 index = minIndexesBuffer[number];
243 for (uint32_t number = quarterPoints * 4; number < num_points; number++) {
244 if (source[number] < min) {
246 min = source[number];
249 target[0] = (uint16_t)index;
263 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
268 const uint32_t quarter_points = num_points / 4;
269 const float* inputPtr = source;
272 uint32x4_t vec_indices = { 0, 1, 2, 3 };
273 const uint32x4_t vec_incr = vdupq_n_u32(4);
275 float32x4_t vec_min = vdupq_n_f32(FLT_MAX);
276 uint32x4_t vec_min_idx = vdupq_n_u32(0);
278 for (uint32_t i = 0; i < quarter_points; i++) {
279 float32x4_t vec_val = vld1q_f32(inputPtr);
283 uint32x4_t lt_mask = vcltq_f32(vec_val, vec_min);
284 vec_min_idx = vbslq_u32(lt_mask, vec_indices, vec_min_idx);
287 vec_min = vminq_f32(vec_val, vec_min);
289 vec_indices = vaddq_u32(vec_indices, vec_incr);
295 vst1q_f32(min_buf, vec_min);
296 vst1q_u32(idx_buf, vec_min_idx);
298 float min_val = min_buf[0];
299 uint32_t result_idx = idx_buf[0];
300 for (
int i = 1; i < 4; i++) {
301 if (min_buf[i] < min_val) {
302 min_val = min_buf[i];
303 result_idx = idx_buf[i];
304 }
else if (min_buf[i] == min_val && idx_buf[i] < result_idx) {
305 result_idx = idx_buf[i];
310 for (uint32_t i = quarter_points * 4; i < num_points; i++) {
311 if (source[i] < min_val) {
317 *target = (uint16_t)result_idx;
329volk_32f_index_min_16u_neonv8(uint16_t* target,
const float* source, uint32_t num_points)
331 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
336 const uint32_t quarter_points = num_points / 4;
337 const float* inputPtr = source;
340 uint32x4_t vec_indices = { 0, 1, 2, 3 };
341 const uint32x4_t vec_incr = vdupq_n_u32(4);
343 float32x4_t vec_min = vdupq_n_f32(FLT_MAX);
344 uint32x4_t vec_min_idx = vdupq_n_u32(0);
346 for (uint32_t i = 0; i < quarter_points; i++) {
347 float32x4_t vec_val = vld1q_f32(inputPtr);
351 uint32x4_t lt_mask = vcltq_f32(vec_val, vec_min);
352 vec_min_idx = vbslq_u32(lt_mask, vec_indices, vec_min_idx);
355 vec_min = vminq_f32(vec_val, vec_min);
357 vec_indices = vaddq_u32(vec_indices, vec_incr);
361 float min_val = vminvq_f32(vec_min);
362 uint32x4_t min_mask = vceqq_f32(vec_min, vdupq_n_f32(min_val));
363 uint32x4_t idx_masked = vbslq_u32(min_mask, vec_min_idx, vdupq_n_u32(UINT32_MAX));
364 uint32_t result_idx = vminvq_u32(idx_masked);
367 for (uint32_t i = quarter_points * 4; i < num_points; i++) {
368 if (source[i] < min_val) {
374 *target = (uint16_t)result_idx;
380#ifdef LV_HAVE_GENERIC
385 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
387 float min = source[0];
390 for (uint32_t i = 1; i < num_points; ++i) {
391 if (source[i] < min) {
401#ifdef LV_HAVE_AVX512F
402#include <immintrin.h>
405static inline void volk_32f_index_min_16u_a_avx512f(uint16_t* target,
409 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
412 const uint32_t sixteenthPoints = num_points / 16;
414 const float* inputPtr = source;
416 __m512 indexIncrementValues = _mm512_set1_ps(16);
417 __m512 currentIndexes = _mm512_set_ps(
418 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16);
420 float min = source[0];
422 __m512 minValues = _mm512_set1_ps(min);
423 __m512 minValuesIndex = _mm512_setzero_ps();
424 __mmask16 compareResults;
425 __m512 currentValues;
430 for (; number < sixteenthPoints; number++) {
431 currentValues = _mm512_load_ps(inputPtr);
433 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrementValues);
434 compareResults = _mm512_cmp_ps_mask(currentValues, minValues, _CMP_LT_OS);
436 _mm512_mask_blend_ps(compareResults, minValuesIndex, currentIndexes);
437 minValues = _mm512_mask_blend_ps(compareResults, minValues, currentValues);
441 _mm512_store_ps(minValuesBuffer, minValues);
442 _mm512_store_ps(minIndexesBuffer, minValuesIndex);
444 for (number = 0; number < 16; number++) {
445 if (minValuesBuffer[number] < min) {
446 index = minIndexesBuffer[number];
447 min = minValuesBuffer[number];
448 }
else if (minValuesBuffer[number] == min) {
449 if (index > minIndexesBuffer[number])
450 index = minIndexesBuffer[number];
454 number = sixteenthPoints * 16;
455 for (; number < num_points; number++) {
456 if (source[number] < min) {
458 min = source[number];
461 target[0] = (uint16_t)index;
469#ifndef INCLUDED_volk_32f_index_min_16u_u_H
470#define INCLUDED_volk_32f_index_min_16u_u_H
478#include <immintrin.h>
483 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
484 const uint32_t eighthPoints = num_points / 8;
486 float* inputPtr = (
float*)source;
488 __m256 indexIncrementValues = _mm256_set1_ps(8);
489 __m256 currentIndexes = _mm256_set_ps(-1, -2, -3, -4, -5, -6, -7, -8);
491 float min = source[0];
493 __m256 minValues = _mm256_set1_ps(min);
494 __m256 minValuesIndex = _mm256_setzero_ps();
495 __m256 compareResults;
496 __m256 currentValues;
501 for (uint32_t number = 0; number < eighthPoints; number++) {
503 currentValues = _mm256_loadu_ps(inputPtr);
505 currentIndexes = _mm256_add_ps(currentIndexes, indexIncrementValues);
507 compareResults = _mm256_cmp_ps(currentValues, minValues, _CMP_LT_OS);
509 minValuesIndex = _mm256_blendv_ps(minValuesIndex, currentIndexes, compareResults);
510 minValues = _mm256_blendv_ps(minValues, currentValues, compareResults);
514 _mm256_storeu_ps(minValuesBuffer, minValues);
515 _mm256_storeu_ps(minIndexesBuffer, minValuesIndex);
517 for (uint32_t number = 0; number < 8; number++) {
518 if (minValuesBuffer[number] < min) {
519 index = minIndexesBuffer[number];
520 min = minValuesBuffer[number];
521 }
else if (minValuesBuffer[number] == min) {
522 if (index > minIndexesBuffer[number])
523 index = minIndexesBuffer[number];
527 for (uint32_t number = eighthPoints * 8; number < num_points; number++) {
528 if (source[number] < min) {
530 min = source[number];
533 target[0] = (uint16_t)index;
538#ifdef LV_HAVE_AVX512F
539#include <immintrin.h>
542static inline void volk_32f_index_min_16u_u_avx512f(uint16_t* target,
546 num_points = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
549 const uint32_t sixteenthPoints = num_points / 16;
551 const float* inputPtr = source;
553 __m512 indexIncrementValues = _mm512_set1_ps(16);
554 __m512 currentIndexes = _mm512_set_ps(
555 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16);
557 float min = source[0];
559 __m512 minValues = _mm512_set1_ps(min);
560 __m512 minValuesIndex = _mm512_setzero_ps();
561 __mmask16 compareResults;
562 __m512 currentValues;
567 for (; number < sixteenthPoints; number++) {
568 currentValues = _mm512_loadu_ps(inputPtr);
570 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrementValues);
571 compareResults = _mm512_cmp_ps_mask(currentValues, minValues, _CMP_LT_OS);
573 _mm512_mask_blend_ps(compareResults, minValuesIndex, currentIndexes);
574 minValues = _mm512_mask_blend_ps(compareResults, minValues, currentValues);
578 _mm512_store_ps(minValuesBuffer, minValues);
579 _mm512_store_ps(minIndexesBuffer, minValuesIndex);
581 for (number = 0; number < 16; number++) {
582 if (minValuesBuffer[number] < min) {
583 index = minIndexesBuffer[number];
584 min = minValuesBuffer[number];
585 }
else if (minValuesBuffer[number] == min) {
586 if (index > minIndexesBuffer[number])
587 index = minIndexesBuffer[number];
591 number = sixteenthPoints * 16;
592 for (; number < num_points; number++) {
593 if (source[number] < min) {
595 min = source[number];
598 target[0] = (uint16_t)index;
605#include <riscv_vector.h>
608volk_32f_index_min_16u_rvv(uint16_t* target,
const float* src0, uint32_t num_points)
610 vfloat32m8_t vmin = __riscv_vfmv_v_f_f32m8(FLT_MAX, __riscv_vsetvlmax_e32m8());
611 vuint16m4_t vmini = __riscv_vmv_v_x_u16m4(0, __riscv_vsetvlmax_e16m4());
612 vuint16m4_t vidx = __riscv_vid_v_u16m4(__riscv_vsetvlmax_e16m4());
613 size_t n = (num_points > USHRT_MAX) ? USHRT_MAX : num_points;
614 for (
size_t vl; n > 0; n -= vl, src0 += vl) {
615 vl = __riscv_vsetvl_e32m8(n);
616 vfloat32m8_t v = __riscv_vle32_v_f32m8(src0, vl);
617 vbool4_t m = __riscv_vmflt(v, vmin, vl);
618 vmin = __riscv_vfmin_tu(vmin, vmin, v, vl);
619 vmini = __riscv_vmerge_tu(vmini, vmini, vidx, m, vl);
620 vidx = __riscv_vadd(vidx, vl, __riscv_vsetvlmax_e16m4());
622 size_t vl = __riscv_vsetvlmax_e32m8();
623 float min = __riscv_vfmv_f(__riscv_vfredmin(
RISCV_SHRINK8(vfmin, f, 32, vmin),
624 __riscv_vfmv_v_f_f32m1(FLT_MAX, 1),
625 __riscv_vsetvlmax_e32m1()));
627 vbool4_t m = __riscv_vmfeq(vmin, min, vl);
628 vuint16m4_t idx_masked = __riscv_vmerge(
629 __riscv_vmv_v_x_u16m4(UINT16_MAX, __riscv_vsetvlmax_e16m4()), vmini, m, vl);
631 *target = __riscv_vmv_x(__riscv_vredminu(
RISCV_SHRINK4(vminu, u, 16, idx_masked),
632 __riscv_vmv_v_x_u16m1(UINT16_MAX, 1),
633 __riscv_vsetvlmax_e16m1()));