57#ifndef INCLUDED_volk_32fc_index_max_32u_a_H
58#define INCLUDED_volk_32fc_index_max_32u_a_H
68static inline void volk_32fc_index_max_32u_a_avx2_variant_0(uint32_t* target,
72 const __m256i indices_increment = _mm256_set1_epi32(8);
78 __m256i current_indices = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
80 __m256 max_values = _mm256_setzero_ps();
81 __m256i max_indices = _mm256_setzero_si256();
83 for (
unsigned i = 0; i < num_points / 8u; ++i) {
84 __m256 in0 = _mm256_load_ps((
float*)src0);
85 __m256 in1 = _mm256_load_ps((
float*)(src0 + 4));
87 in0, in1, &max_values, &max_indices, ¤t_indices, indices_increment);
94 _mm256_store_ps(max_values_buffer, max_values);
95 _mm256_store_si256((__m256i*)max_indices_buffer, max_indices);
99 for (
unsigned i = 0; i < 8; i++) {
100 if (max_values_buffer[i] > max) {
101 max = max_values_buffer[i];
102 index = max_indices_buffer[i];
107 for (
unsigned i = num_points & (~7u); i < num_points; ++i) {
108 const float abs_squared =
110 if (abs_squared > max) {
123#include <immintrin.h>
126static inline void volk_32fc_index_max_32u_a_avx2_variant_1(uint32_t* target,
130 const __m256i indices_increment = _mm256_set1_epi32(8);
136 __m256i current_indices = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
138 __m256 max_values = _mm256_setzero_ps();
139 __m256i max_indices = _mm256_setzero_si256();
141 for (
unsigned i = 0; i < num_points / 8u; ++i) {
142 __m256 in0 = _mm256_load_ps((
float*)src0);
143 __m256 in1 = _mm256_load_ps((
float*)(src0 + 4));
145 in0, in1, &max_values, &max_indices, ¤t_indices, indices_increment);
152 _mm256_store_ps(max_values_buffer, max_values);
153 _mm256_store_si256((__m256i*)max_indices_buffer, max_indices);
157 for (
unsigned i = 0; i < 8; i++) {
158 if (max_values_buffer[i] > max) {
159 max = max_values_buffer[i];
160 index = max_indices_buffer[i];
165 for (
unsigned i = num_points & (~7u); i < num_points; ++i) {
166 const float abs_squared =
168 if (abs_squared > max) {
181#include <pmmintrin.h>
182#include <xmmintrin.h>
188 const uint32_t num_bytes = num_points * 8;
195 __m128 xmm1, xmm2, xmm3;
196 __m128i xmm8, xmm11, xmm12, xmm9, xmm10;
198 xmm5.
int_vec = _mm_setzero_si128();
199 xmm4.
int_vec = _mm_setzero_si128();
200 holderf.
int_vec = _mm_setzero_si128();
201 holderi.
int_vec = _mm_setzero_si128();
203 int bound = num_bytes >> 5;
206 xmm8 = _mm_setr_epi32(0, 1, 2, 3);
207 xmm9 = _mm_setzero_si128();
208 xmm10 = _mm_setr_epi32(4, 4, 4, 4);
209 xmm3 = _mm_setzero_ps();
211 for (;
i < bound; ++
i) {
212 xmm1 = _mm_load_ps((
float*)src0);
213 xmm2 = _mm_load_ps((
float*)&src0[2]);
217 xmm1 = _mm_mul_ps(xmm1, xmm1);
218 xmm2 = _mm_mul_ps(xmm2, xmm2);
220 xmm1 = _mm_hadd_ps(xmm1, xmm2);
222 xmm3 = _mm_max_ps(xmm1, xmm3);
224 xmm4.
float_vec = _mm_cmplt_ps(xmm1, xmm3);
225 xmm5.
float_vec = _mm_cmpeq_ps(xmm1, xmm3);
227 xmm11 = _mm_and_si128(xmm8, xmm5.
int_vec);
228 xmm12 = _mm_and_si128(xmm9, xmm4.
int_vec);
230 xmm9 = _mm_add_epi32(xmm11, xmm12);
232 xmm8 = _mm_add_epi32(xmm8, xmm10);
235 if (num_bytes >> 4 & 1) {
236 xmm2 = _mm_load_ps((
float*)src0);
241 xmm2 = _mm_mul_ps(xmm2, xmm2);
245 xmm1 = _mm_hadd_ps(xmm2, xmm2);
247 xmm3 = _mm_max_ps(xmm1, xmm3);
249 xmm10 = _mm_setr_epi32(2, 2, 2, 2);
251 xmm4.
float_vec = _mm_cmplt_ps(xmm1, xmm3);
252 xmm5.
float_vec = _mm_cmpeq_ps(xmm1, xmm3);
254 xmm11 = _mm_and_si128(xmm8, xmm5.
int_vec);
255 xmm12 = _mm_and_si128(xmm9, xmm4.
int_vec);
257 xmm9 = _mm_add_epi32(xmm11, xmm12);
259 xmm8 = _mm_add_epi32(xmm8, xmm10);
262 if (num_bytes >> 3 & 1) {
266 xmm2 = _mm_load1_ps(&sq_dist);
270 xmm3 = _mm_max_ss(xmm3, xmm2);
272 xmm4.
float_vec = _mm_cmplt_ps(xmm1, xmm3);
273 xmm5.
float_vec = _mm_cmpeq_ps(xmm1, xmm3);
275 xmm8 = _mm_shuffle_epi32(xmm8, 0x00);
277 xmm11 = _mm_and_si128(xmm8, xmm4.
int_vec);
278 xmm12 = _mm_and_si128(xmm9, xmm5.
int_vec);
280 xmm9 = _mm_add_epi32(xmm11, xmm12);
283 _mm_store_ps((
float*)&(holderf.
f), xmm3);
284 _mm_store_si128(&(holderi.
int_vec), xmm9);
286 target[0] = holderi.
i[0];
287 sq_dist = holderf.
f[0];
288 target[0] = (holderf.
f[1] > sq_dist) ? holderi.
i[1] : target[0];
289 sq_dist = (holderf.
f[1] > sq_dist) ? holderf.
f[1] : sq_dist;
290 target[0] = (holderf.
f[2] > sq_dist) ? holderi.
i[2] : target[0];
291 sq_dist = (holderf.
f[2] > sq_dist) ? holderf.
f[2] : sq_dist;
292 target[0] = (holderf.
f[3] > sq_dist) ? holderi.
i[3] : target[0];
293 sq_dist = (holderf.
f[3] > sq_dist) ? holderf.
f[3] : sq_dist;
298#ifdef LV_HAVE_GENERIC
303 const uint32_t num_bytes = num_points * 8;
311 for (;
i < (num_bytes >> 3); ++
i) {
325#ifdef LV_HAVE_AVX512F
326#include <immintrin.h>
328static inline void volk_32fc_index_max_32u_a_avx512f(uint32_t* target,
333 const uint32_t sixteenthPoints = num_points / 16;
336 __m512 currentIndexes =
337 _mm512_setr_ps(0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15);
338 const __m512 indexIncrement = _mm512_set1_ps(16);
340 __m512 maxValues = _mm512_setzero_ps();
341 __m512 maxIndices = _mm512_setzero_ps();
343 for (uint32_t number = 0; number < sixteenthPoints; number++) {
345 __m512 in0 = _mm512_load_ps((
const float*)src0Ptr);
346 __m512 in1 = _mm512_load_ps((
const float*)(src0Ptr + 8));
350 in0 = _mm512_mul_ps(in0, in0);
351 in1 = _mm512_mul_ps(in1, in1);
355 __m512 sw0 = _mm512_shuffle_ps(in0, in0, 0xB1);
356 __m512 sw1 = _mm512_shuffle_ps(in1, in1, 0xB1);
357 __m512 sum0 = _mm512_add_ps(in0, sw0);
358 __m512 sum1 = _mm512_add_ps(in1, sw1);
362 __m512 mag_sq = _mm512_shuffle_ps(sum0, sum1, 0x88);
365 __mmask16 cmpMask = _mm512_cmp_ps_mask(mag_sq, maxValues, _CMP_GT_OS);
366 maxIndices = _mm512_mask_blend_ps(cmpMask, maxIndices, currentIndexes);
367 maxValues = _mm512_max_ps(mag_sq, maxValues);
369 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrement);
375 _mm512_store_ps(maxValuesBuffer, maxValues);
376 _mm512_store_ps(maxIndexesBuffer, maxIndices);
380 for (uint32_t
i = 0;
i < 16;
i++) {
381 if (maxValuesBuffer[
i] > max) {
382 max = maxValuesBuffer[
i];
383 index = (uint32_t)maxIndexesBuffer[
i];
384 }
else if (maxValuesBuffer[
i] == max) {
385 if ((uint32_t)maxIndexesBuffer[
i] < index)
386 index = (uint32_t)maxIndexesBuffer[
i];
391 for (uint32_t number = sixteenthPoints * 16; number < num_points; number++) {
392 const float re =
lv_creal(*src0Ptr);
393 const float im =
lv_cimag(*src0Ptr);
394 const float sq_dist = re * re + im * im;
408#ifndef INCLUDED_volk_32fc_index_max_32u_u_H
409#define INCLUDED_volk_32fc_index_max_32u_u_H
416#include <immintrin.h>
419static inline void volk_32fc_index_max_32u_u_avx2_variant_0(uint32_t* target,
423 const __m256i indices_increment = _mm256_set1_epi32(8);
429 __m256i current_indices = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
431 __m256 max_values = _mm256_setzero_ps();
432 __m256i max_indices = _mm256_setzero_si256();
434 for (
unsigned i = 0;
i < num_points / 8u; ++
i) {
435 __m256 in0 = _mm256_loadu_ps((
float*)src0);
436 __m256 in1 = _mm256_loadu_ps((
float*)(src0 + 4));
438 in0, in1, &max_values, &max_indices, ¤t_indices, indices_increment);
445 _mm256_store_ps(max_values_buffer, max_values);
446 _mm256_store_si256((__m256i*)max_indices_buffer, max_indices);
450 for (
unsigned i = 0;
i < 8;
i++) {
451 if (max_values_buffer[
i] > max) {
452 max = max_values_buffer[
i];
453 index = max_indices_buffer[
i];
458 for (
unsigned i = num_points & (~7u);
i < num_points; ++
i) {
459 const float abs_squared =
461 if (abs_squared > max) {
474#include <immintrin.h>
477static inline void volk_32fc_index_max_32u_u_avx2_variant_1(uint32_t* target,
481 const __m256i indices_increment = _mm256_set1_epi32(8);
487 __m256i current_indices = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
489 __m256 max_values = _mm256_setzero_ps();
490 __m256i max_indices = _mm256_setzero_si256();
492 for (
unsigned i = 0;
i < num_points / 8u; ++
i) {
493 __m256 in0 = _mm256_loadu_ps((
float*)src0);
494 __m256 in1 = _mm256_loadu_ps((
float*)(src0 + 4));
496 in0, in1, &max_values, &max_indices, ¤t_indices, indices_increment);
503 _mm256_store_ps(max_values_buffer, max_values);
504 _mm256_store_si256((__m256i*)max_indices_buffer, max_indices);
508 for (
unsigned i = 0;
i < 8;
i++) {
509 if (max_values_buffer[
i] > max) {
510 max = max_values_buffer[
i];
511 index = max_indices_buffer[
i];
516 for (
unsigned i = num_points & (~7u);
i < num_points; ++
i) {
517 const float abs_squared =
519 if (abs_squared > max) {
538 unsigned int number = 0;
539 const uint32_t quarter_points = num_points / 4;
542 uint32_t indices[4] = { 0, 1, 2, 3 };
543 const uint32x4_t vec_indices_incr = vdupq_n_u32(4);
544 uint32x4_t vec_indices = vld1q_u32(indices);
545 uint32x4_t vec_max_indices = vec_indices;
551 float32x4_t vec_max = vdupq_n_f32(FLT_MIN);
553 for (; number < quarter_points; number++) {
555 const float32x4_t vec_mag2 =
559 const uint32x4_t gt_mask = vcgtq_f32(vec_mag2, vec_max);
560 vec_max = vbslq_f32(gt_mask, vec_mag2, vec_max);
561 vec_max_indices = vbslq_u32(gt_mask, vec_indices, vec_max_indices);
562 vec_indices = vaddq_u32(vec_indices, vec_indices_incr);
564 uint32_t tmp_max_indices[4];
566 vst1q_u32(tmp_max_indices, vec_max_indices);
567 vst1q_f32(tmp_max, vec_max);
569 for (
int i = 0;
i < 4;
i++) {
570 if (tmp_max[
i] > max) {
572 index = tmp_max_indices[
i];
573 }
else if (tmp_max[
i] == max) {
574 if (tmp_max_indices[
i] < index)
575 index = tmp_max_indices[
i];
580 for (number = quarter_points * 4; number < num_points; number++) {
581 const float re =
lv_creal(*src0Ptr);
582 const float im =
lv_cimag(*src0Ptr);
583 const float sq_dist = re * re + im * im;
601static inline void volk_32fc_index_max_32u_neonv8(uint32_t* target,
608 const uint32_t quarter_points = num_points / 4;
612 uint32x4_t vec_indices = { 0, 1, 2, 3 };
613 const uint32x4_t vec_incr = vdupq_n_u32(4);
615 float32x4_t vec_max = vdupq_n_f32(0.0f);
616 uint32x4_t vec_max_idx = vdupq_n_u32(0);
618 for (uint32_t
i = 0;
i < quarter_points;
i++) {
620 float32x4x2_t cplx = vld2q_f32((
const float*)inputPtr);
625 vfmaq_f32(vmulq_f32(cplx.val[0], cplx.val[0]), cplx.val[1], cplx.val[1]);
628 uint32x4_t gt_mask = vcgtq_f32(mag2, vec_max);
629 vec_max_idx = vbslq_u32(gt_mask, vec_indices, vec_max_idx);
632 vec_max = vmaxq_f32(mag2, vec_max);
634 vec_indices = vaddq_u32(vec_indices, vec_incr);
638 float max_val = vmaxvq_f32(vec_max);
641 uint32x4_t max_mask = vceqq_f32(vec_max, vdupq_n_f32(max_val));
642 uint32x4_t idx_masked = vbslq_u32(max_mask, vec_max_idx, vdupq_n_u32(UINT32_MAX));
643 uint32_t result_idx = vminvq_u32(idx_masked);
646 for (uint32_t
i = quarter_points * 4;
i < num_points;
i++) {
649 float mag2 = re * re + im * im;
650 if (mag2 > max_val) {
656 *target = result_idx;
662#ifdef LV_HAVE_AVX512F
663#include <immintrin.h>
665static inline void volk_32fc_index_max_32u_u_avx512f(uint32_t* target,
670 const uint32_t sixteenthPoints = num_points / 16;
673 __m512 currentIndexes =
674 _mm512_setr_ps(0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15);
675 const __m512 indexIncrement = _mm512_set1_ps(16);
677 __m512 maxValues = _mm512_setzero_ps();
678 __m512 maxIndices = _mm512_setzero_ps();
680 for (uint32_t number = 0; number < sixteenthPoints; number++) {
682 __m512 in0 = _mm512_loadu_ps((
const float*)src0Ptr);
683 __m512 in1 = _mm512_loadu_ps((
const float*)(src0Ptr + 8));
687 in0 = _mm512_mul_ps(in0, in0);
688 in1 = _mm512_mul_ps(in1, in1);
692 __m512 sw0 = _mm512_shuffle_ps(in0, in0, 0xB1);
693 __m512 sw1 = _mm512_shuffle_ps(in1, in1, 0xB1);
694 __m512 sum0 = _mm512_add_ps(in0, sw0);
695 __m512 sum1 = _mm512_add_ps(in1, sw1);
699 __m512 mag_sq = _mm512_shuffle_ps(sum0, sum1, 0x88);
702 __mmask16 cmpMask = _mm512_cmp_ps_mask(mag_sq, maxValues, _CMP_GT_OS);
703 maxIndices = _mm512_mask_blend_ps(cmpMask, maxIndices, currentIndexes);
704 maxValues = _mm512_max_ps(mag_sq, maxValues);
706 currentIndexes = _mm512_add_ps(currentIndexes, indexIncrement);
712 _mm512_store_ps(maxValuesBuffer, maxValues);
713 _mm512_store_ps(maxIndexesBuffer, maxIndices);
717 for (uint32_t
i = 0;
i < 16;
i++) {
718 if (maxValuesBuffer[
i] > max) {
719 max = maxValuesBuffer[
i];
720 index = (uint32_t)maxIndexesBuffer[
i];
721 }
else if (maxValuesBuffer[
i] == max) {
722 if ((uint32_t)maxIndexesBuffer[
i] < index)
723 index = (uint32_t)maxIndexesBuffer[
i];
728 for (uint32_t number = sixteenthPoints * 16; number < num_points; number++) {
729 const float re =
lv_creal(*src0Ptr);
730 const float im =
lv_cimag(*src0Ptr);
731 const float sq_dist = re * re + im * im;
745#include <riscv_vector.h>
748volk_32fc_index_max_32u_rvv(uint32_t* target,
const lv_32fc_t* src0, uint32_t num_points)
750 vfloat32m4_t vmax = __riscv_vfmv_v_f_f32m4(0, __riscv_vsetvlmax_e32m4());
751 vuint32m4_t vmaxi = __riscv_vmv_v_x_u32m4(0, __riscv_vsetvlmax_e32m4());
752 vuint32m4_t vidx = __riscv_vid_v_u32m4(__riscv_vsetvlmax_e32m4());
753 size_t n = num_points;
754 for (
size_t vl; n > 0; n -= vl, src0 += vl) {
755 vl = __riscv_vsetvl_e32m4(n);
756 vuint64m8_t vc = __riscv_vle64_v_u64m8((
const uint64_t*)src0, vl);
757 vfloat32m4_t vr = __riscv_vreinterpret_f32m4(__riscv_vnsrl(vc, 0, vl));
758 vfloat32m4_t vi = __riscv_vreinterpret_f32m4(__riscv_vnsrl(vc, 32, vl));
759 vfloat32m4_t v = __riscv_vfmacc(__riscv_vfmul(vr, vr, vl), vi, vi, vl);
760 vbool8_t m = __riscv_vmflt(vmax, v, vl);
761 vmax = __riscv_vfmax_tu(vmax, vmax, v, vl);
762 vmaxi = __riscv_vmerge_tu(vmaxi, vmaxi, vidx, m, vl);
763 vidx = __riscv_vadd(vidx, vl, __riscv_vsetvlmax_e32m4());
765 size_t vl = __riscv_vsetvlmax_e32m4();
766 float max = __riscv_vfmv_f(__riscv_vfredmax(
RISCV_SHRINK4(vfmax,
f, 32, vmax),
767 __riscv_vfmv_v_f_f32m1(0, 1),
768 __riscv_vsetvlmax_e32m1()));
770 vbool8_t m = __riscv_vmfeq(vmax, max, vl);
771 vuint32m4_t idx_masked =
772 __riscv_vmerge(__riscv_vmv_v_x_u32m4(UINT32_MAX, vl), vmaxi, m, vl);
774 *target = __riscv_vmv_x(__riscv_vredminu(
RISCV_SHRINK4(vminu, u, 32, idx_masked),
775 __riscv_vmv_v_x_u32m1(UINT32_MAX, 1),
776 __riscv_vsetvlmax_e32m1()));
782#include <riscv_vector.h>
784static inline void volk_32fc_index_max_32u_rvvseg(uint32_t* target,
788 vfloat32m4_t vmax = __riscv_vfmv_v_f_f32m4(0, __riscv_vsetvlmax_e32m4());
789 vuint32m4_t vmaxi = __riscv_vmv_v_x_u32m4(0, __riscv_vsetvlmax_e32m4());
790 vuint32m4_t vidx = __riscv_vid_v_u32m4(__riscv_vsetvlmax_e32m4());
791 size_t n = num_points;
792 for (
size_t vl; n > 0; n -= vl, src0 += vl) {
793 vl = __riscv_vsetvl_e32m4(n);
794 vfloat32m4x2_t vc = __riscv_vlseg2e32_v_f32m4x2((
const float*)src0, vl);
795 vfloat32m4_t vr = __riscv_vget_f32m4(vc, 0), vi = __riscv_vget_f32m4(vc, 1);
796 vfloat32m4_t v = __riscv_vfmacc(__riscv_vfmul(vr, vr, vl), vi, vi, vl);
797 vbool8_t m = __riscv_vmflt(vmax, v, vl);
798 vmax = __riscv_vfmax_tu(vmax, vmax, v, vl);
799 vmaxi = __riscv_vmerge_tu(vmaxi, vmaxi, vidx, m, vl);
800 vidx = __riscv_vadd(vidx, vl, __riscv_vsetvlmax_e32m4());
802 size_t vl = __riscv_vsetvlmax_e32m4();
803 float max = __riscv_vfmv_f(__riscv_vfredmax(
RISCV_SHRINK4(vfmax,
f, 32, vmax),
804 __riscv_vfmv_v_f_f32m1(0, 1),
805 __riscv_vsetvlmax_e32m1()));
807 vbool8_t m = __riscv_vmfeq(vmax, max, vl);
808 vuint32m4_t idx_masked =
809 __riscv_vmerge(__riscv_vmv_v_x_u32m4(UINT32_MAX, vl), vmaxi, m, vl);
811 *target = __riscv_vmv_x(__riscv_vredminu(
RISCV_SHRINK4(vminu, u, 32, idx_masked),
812 __riscv_vmv_v_x_u32m1(UINT32_MAX, 1),
813 __riscv_vsetvlmax_e32m1()));