61#ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
62#define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
73 unsigned int num_points)
76 for (
unsigned int i = 0; i < num_points; ++i) {
77 res += (*input++) *
lv_conj((*taps++));
89 unsigned int num_points)
92 const unsigned int num_bytes = num_points * 8;
94 float* res = (
float*)result;
95 float* in = (
float*)input;
96 float* tp = (
float*)taps;
97 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
99 float sum0[2] = { 0, 0 };
100 float sum1[2] = { 0, 0 };
103 for (i = 0; i < n_2_ccomplex_blocks; ++i) {
104 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
105 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
106 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
107 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
113 res[0] = sum0[0] + sum1[0];
114 res[1] = sum0[1] + sum1[1];
116 if (num_bytes >> 3 & 1) {
117 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
125#include <pmmintrin.h>
126#include <xmmintrin.h>
131 unsigned int num_points)
134 __m128 sum_a_mult_b_real = _mm_setzero_ps();
135 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
137 for (
long unsigned i = 0; i < (num_points & ~1u); i += 2) {
149 __m128 a = _mm_loadu_ps((
const float*)&input[i]);
150 __m128 b = _mm_loadu_ps((
const float*)&taps[i]);
151 __m128 b_real = _mm_moveldup_ps(b);
152 __m128 b_imag = _mm_movehdup_ps(b);
155 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
157 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
162 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
164 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
166 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
168 _mm_storel_pi((__m64*)result, sum);
171 if (num_points & 1u) {
184#include <pmmintrin.h>
185#include <xmmintrin.h>
190 unsigned int num_points)
193 __m128 sum_a_mult_b_real = _mm_setzero_ps();
194 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
196 for (
long unsigned i = 0; i < (num_points & ~1u); i += 2) {
208 __m128 a = _mm_load_ps((
const float*)&input[i]);
209 __m128 b = _mm_load_ps((
const float*)&taps[i]);
210 __m128 b_real = _mm_moveldup_ps(b);
211 __m128 b_imag = _mm_movehdup_ps(b);
214 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
216 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
221 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
223 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
225 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
227 _mm_storel_pi((__m64*)result, sum);
230 if (num_points & 1u) {
243#include <immintrin.h>
248 unsigned int num_points)
251 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
252 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
254 for (
long unsigned i = 0; i < (num_points & ~3u); i += 4) {
266 __m256 a = _mm256_loadu_ps((
const float*)&input[i]);
267 __m256 b = _mm256_loadu_ps((
const float*)&taps[i]);
268 __m256 b_real = _mm256_moveldup_ps(b);
269 __m256 b_imag = _mm256_movehdup_ps(b);
272 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
274 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
278 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
280 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
284 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
286 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
288 __m128 lower = _mm256_extractf128_ps(sum, 0);
289 _mm_storel_pi((__m64*)result, lower);
292 for (
long unsigned i = num_points & ~3u; i < num_points; ++i) {
303#include <immintrin.h>
308 unsigned int num_points)
311 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
312 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
314 for (
long unsigned i = 0; i < (num_points & ~3u); i += 4) {
326 __m256 a = _mm256_load_ps((
const float*)&input[i]);
327 __m256 b = _mm256_load_ps((
const float*)&taps[i]);
328 __m256 b_real = _mm256_moveldup_ps(b);
329 __m256 b_imag = _mm256_movehdup_ps(b);
332 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
334 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
338 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
340 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
344 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
346 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
348 __m128 lower = _mm256_extractf128_ps(sum, 0);
349 _mm_storel_pi((__m64*)result, lower);
352 for (
long unsigned i = num_points & ~3u; i < num_points; ++i) {
361#if LV_HAVE_AVX512F && LV_HAVE_AVX512DQ
363#include <immintrin.h>
366volk_32fc_x2_conjugate_dot_prod_32fc_u_avx512dq(
lv_32fc_t* result,
369 unsigned int num_points)
372 __m512 sum_a_mult_b_real = _mm512_setzero_ps();
373 __m512 sum_a_mult_b_imag = _mm512_setzero_ps();
376 const __m512 sign_mask = _mm512_castsi512_ps(_mm512_set_epi32(0,
393 for (
long unsigned i = 0; i < (num_points & ~7u); i += 8) {
400 __m512 a = _mm512_loadu_ps((
const float*)&input[i]);
401 __m512 b = _mm512_loadu_ps((
const float*)&taps[i]);
404 __m512 b_real = _mm512_moveldup_ps(b);
405 __m512 b_imag = _mm512_movehdup_ps(b);
408 sum_a_mult_b_real = _mm512_fmadd_ps(a, b_real, sum_a_mult_b_real);
411 __m512 mult_imag = _mm512_mul_ps(a, b_imag);
412 mult_imag = _mm512_xor_ps(mult_imag, sign_mask);
413 sum_a_mult_b_imag = _mm512_add_ps(sum_a_mult_b_imag, mult_imag);
417 sum_a_mult_b_imag = _mm512_permute_ps(sum_a_mult_b_imag, 0xB1);
418 __m512 sum = _mm512_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
422 __m256 sum_high = _mm512_extractf32x8_ps(sum, 1);
423 __m256 sum_low = _mm512_castps512_ps256(sum);
424 __m256 sum256 = _mm256_add_ps(sum_high, sum_low);
427 __m128 sum128_high = _mm256_extractf128_ps(sum256, 1);
428 __m128 sum128_low = _mm256_castps256_ps128(sum256);
429 __m128 sum128 = _mm_add_ps(sum128_high, sum128_low);
432 sum128 = _mm_add_ps(sum128, _mm_shuffle_ps(sum128, sum128, _MM_SHUFFLE(1, 0, 3, 2)));
435 _mm_storel_pi((__m64*)result, sum128);
438 for (
long unsigned i = num_points & ~7u; i < num_points; ++i) {
448#if LV_HAVE_AVX512F && LV_HAVE_AVX512DQ
450#include <immintrin.h>
453volk_32fc_x2_conjugate_dot_prod_32fc_a_avx512dq(
lv_32fc_t* result,
456 unsigned int num_points)
459 __m512 sum_a_mult_b_real = _mm512_setzero_ps();
460 __m512 sum_a_mult_b_imag = _mm512_setzero_ps();
463 const __m512 sign_mask = _mm512_castsi512_ps(_mm512_set_epi32(0,
480 for (
long unsigned i = 0; i < (num_points & ~7u); i += 8) {
487 __m512 a = _mm512_load_ps((
const float*)&input[i]);
488 __m512 b = _mm512_load_ps((
const float*)&taps[i]);
491 __m512 b_real = _mm512_moveldup_ps(b);
492 __m512 b_imag = _mm512_movehdup_ps(b);
495 sum_a_mult_b_real = _mm512_fmadd_ps(a, b_real, sum_a_mult_b_real);
498 __m512 mult_imag = _mm512_mul_ps(a, b_imag);
499 mult_imag = _mm512_xor_ps(mult_imag, sign_mask);
500 sum_a_mult_b_imag = _mm512_add_ps(sum_a_mult_b_imag, mult_imag);
504 sum_a_mult_b_imag = _mm512_permute_ps(sum_a_mult_b_imag, 0xB1);
505 __m512 sum = _mm512_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
508 __m256 sum_high = _mm512_extractf32x8_ps(sum, 1);
509 __m256 sum_low = _mm512_castps512_ps256(sum);
510 __m256 sum256 = _mm256_add_ps(sum_high, sum_low);
512 __m128 sum128_high = _mm256_extractf128_ps(sum256, 1);
513 __m128 sum128_low = _mm256_castps256_ps128(sum256);
514 __m128 sum128 = _mm_add_ps(sum128_high, sum128_low);
516 sum128 = _mm_add_ps(sum128, _mm_shuffle_ps(sum128, sum128, _MM_SHUFFLE(1, 0, 3, 2)));
518 _mm_storel_pi((__m64*)result, sum128);
521 for (
long unsigned i = num_points & ~7u; i < num_points; ++i) {
535 unsigned int num_points)
538 unsigned int quarter_points = num_points / 4;
545 float32x4x2_t a_val, b_val, accumulator;
546 float32x4x2_t tmp_imag;
547 accumulator.val[0] = vdupq_n_f32(0);
548 accumulator.val[1] = vdupq_n_f32(0);
550 for (number = 0; number < quarter_points; ++number) {
551 a_val = vld2q_f32((
float*)a_ptr);
552 b_val = vld2q_f32((
float*)b_ptr);
557 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
558 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
561 tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
562 tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
564 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
565 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
572 vst2q_f32((
float*)accum_result, accumulator);
573 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
576 for (number = quarter_points * 4; number < num_points; ++number) {
577 *result += (*a_ptr++) *
lv_conj(*b_ptr++);
587static inline void volk_32fc_x2_conjugate_dot_prod_32fc_neonv8(
lv_32fc_t* result,
590 unsigned int num_points)
592 unsigned int n = num_points;
597 float32x4_t acc0_r = vdupq_n_f32(0);
598 float32x4_t acc0_i = vdupq_n_f32(0);
599 float32x4_t acc1_r = vdupq_n_f32(0);
600 float32x4_t acc1_i = vdupq_n_f32(0);
604 float32x4x2_t a0 = vld2q_f32((
const float*)a);
605 float32x4x2_t b0 = vld2q_f32((
const float*)b);
606 float32x4x2_t a1 = vld2q_f32((
const float*)(a + 4));
607 float32x4x2_t b1 = vld2q_f32((
const float*)(b + 4));
616 acc0_r = vfmaq_f32(acc0_r, a0.val[0], b0.val[0]);
617 acc0_r = vfmaq_f32(acc0_r, a0.val[1], b0.val[1]);
618 acc0_i = vfmaq_f32(acc0_i, a0.val[1], b0.val[0]);
619 acc0_i = vfmsq_f32(acc0_i, a0.val[0], b0.val[1]);
621 acc1_r = vfmaq_f32(acc1_r, a1.val[0], b1.val[0]);
622 acc1_r = vfmaq_f32(acc1_r, a1.val[1], b1.val[1]);
623 acc1_i = vfmaq_f32(acc1_i, a1.val[1], b1.val[0]);
624 acc1_i = vfmsq_f32(acc1_i, a1.val[0], b1.val[1]);
633 float32x4x2_t a0 = vld2q_f32((
const float*)a);
634 float32x4x2_t b0 = vld2q_f32((
const float*)b);
636 acc0_r = vfmaq_f32(acc0_r, a0.val[0], b0.val[0]);
637 acc0_r = vfmaq_f32(acc0_r, a0.val[1], b0.val[1]);
638 acc0_i = vfmaq_f32(acc0_i, a0.val[1], b0.val[0]);
639 acc0_i = vfmsq_f32(acc0_i, a0.val[0], b0.val[1]);
647 acc0_r = vaddq_f32(acc0_r, acc1_r);
648 acc0_i = vaddq_f32(acc0_i, acc1_i);
651 float32x2_t sum_r = vadd_f32(vget_low_f32(acc0_r), vget_high_f32(acc0_r));
652 float32x2_t sum_i = vadd_f32(vget_low_f32(acc0_i), vget_high_f32(acc0_i));
653 sum_r = vpadd_f32(sum_r, sum_r);
654 sum_i = vpadd_f32(sum_i, sum_i);
656 float res_r = vget_lane_f32(sum_r, 0);
657 float res_i = vget_lane_f32(sum_i, 0);
675#include <riscv_vector.h>
678static inline void volk_32fc_x2_conjugate_dot_prod_32fc_rvv(
lv_32fc_t* result,
681 unsigned int num_points)
683 vfloat32m2_t vsumr = __riscv_vfmv_v_f_f32m2(0, __riscv_vsetvlmax_e32m2());
684 vfloat32m2_t vsumi = vsumr;
685 size_t n = num_points;
686 for (
size_t vl; n > 0; n -= vl, input += vl, taps += vl) {
687 vl = __riscv_vsetvl_e32m2(n);
688 vuint64m4_t va = __riscv_vle64_v_u64m4((
const uint64_t*)input, vl);
689 vuint64m4_t vb = __riscv_vle64_v_u64m4((
const uint64_t*)taps, vl);
690 vfloat32m2_t var = __riscv_vreinterpret_f32m2(__riscv_vnsrl(va, 0, vl));
691 vfloat32m2_t vbr = __riscv_vreinterpret_f32m2(__riscv_vnsrl(vb, 0, vl));
692 vfloat32m2_t vai = __riscv_vreinterpret_f32m2(__riscv_vnsrl(va, 32, vl));
693 vfloat32m2_t vbi = __riscv_vreinterpret_f32m2(__riscv_vnsrl(vb, 32, vl));
694 vbi = __riscv_vfneg(vbi, vl);
695 vfloat32m2_t vr = __riscv_vfnmsac(__riscv_vfmul(var, vbr, vl), vai, vbi, vl);
696 vfloat32m2_t vi = __riscv_vfmacc(__riscv_vfmul(var, vbi, vl), vai, vbr, vl);
697 vsumr = __riscv_vfadd_tu(vsumr, vsumr, vr, vl);
698 vsumi = __riscv_vfadd_tu(vsumi, vsumi, vi, vl);
700 size_t vl = __riscv_vsetvlmax_e32m1();
703 vfloat32m1_t z = __riscv_vfmv_s_f_f32m1(0, vl);
704 *result =
lv_cmake(__riscv_vfmv_f(__riscv_vfredusum(vr, z, vl)),
705 __riscv_vfmv_f(__riscv_vfredusum(vi, z, vl)));
710#include <riscv_vector.h>
713static inline void volk_32fc_x2_conjugate_dot_prod_32fc_rvvseg(
lv_32fc_t* result,
716 unsigned int num_points)
718 vfloat32m2_t vsumr = __riscv_vfmv_v_f_f32m2(0, __riscv_vsetvlmax_e32m2());
719 vfloat32m2_t vsumi = vsumr;
720 size_t n = num_points;
721 for (
size_t vl; n > 0; n -= vl, input += vl, taps += vl) {
722 vl = __riscv_vsetvl_e32m2(n);
723 vfloat32m2x2_t va = __riscv_vlseg2e32_v_f32m2x2((
const float*)input, vl);
724 vfloat32m2x2_t vb = __riscv_vlseg2e32_v_f32m2x2((
const float*)taps, vl);
725 vfloat32m2_t var = __riscv_vget_f32m2(va, 0), vai = __riscv_vget_f32m2(va, 1);
726 vfloat32m2_t vbr = __riscv_vget_f32m2(vb, 0), vbi = __riscv_vget_f32m2(vb, 1);
727 vbi = __riscv_vfneg(vbi, vl);
728 vfloat32m2_t vr = __riscv_vfnmsac(__riscv_vfmul(var, vbr, vl), vai, vbi, vl);
729 vfloat32m2_t vi = __riscv_vfmacc(__riscv_vfmul(var, vbi, vl), vai, vbr, vl);
730 vsumr = __riscv_vfadd_tu(vsumr, vsumr, vr, vl);
731 vsumi = __riscv_vfadd_tu(vsumi, vsumi, vi, vl);
733 size_t vl = __riscv_vsetvlmax_e32m1();
736 vfloat32m1_t z = __riscv_vfmv_s_f_f32m1(0, vl);
737 *result =
lv_cmake(__riscv_vfmv_f(__riscv_vfredusum(vr, z, vl)),
738 __riscv_vfmv_f(__riscv_vfredusum(vi, z, vl)));
744#ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
745#define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H