Vector Optimized Library of Kernels 3.3.0
Architecture-tuned implementations of math kernels
Loading...
Searching...
No Matches
volk_32fc_x2_multiply_32fc.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
56
57#ifndef INCLUDED_volk_32fc_x2_multiply_32fc_u_H
58#define INCLUDED_volk_32fc_x2_multiply_32fc_u_H
59
60#include <float.h>
61#include <inttypes.h>
62#include <stdio.h>
63#include <volk/volk_complex.h>
64
65#if LV_HAVE_AVX2 && LV_HAVE_FMA
66#include <immintrin.h>
74static inline void volk_32fc_x2_multiply_32fc_u_avx2_fma(lv_32fc_t* cVector,
75 const lv_32fc_t* aVector,
76 const lv_32fc_t* bVector,
77 unsigned int num_points)
78{
79 unsigned int number = 0;
80 const unsigned int quarterPoints = num_points / 4;
81
82 lv_32fc_t* c = cVector;
83 const lv_32fc_t* a = aVector;
84 const lv_32fc_t* b = bVector;
85
86 for (; number < quarterPoints; number++) {
87
88 const __m256 x =
89 _mm256_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
90 const __m256 y =
91 _mm256_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
92
93 const __m256 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr
94 const __m256 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di
95
96 const __m256 tmp2x = _mm256_permute_ps(x, 0xB1); // Re-arrange x to be ai,ar,bi,br
97
98 const __m256 tmp2 = _mm256_mul_ps(tmp2x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
99
100 const __m256 z = _mm256_fmaddsub_ps(
101 x, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
102
103 _mm256_storeu_ps((float*)c, z); // Store the results back into the C container
104
105 a += 4;
106 b += 4;
107 c += 4;
108 }
109
110 number = quarterPoints * 4;
111 for (; number < num_points; number++) {
112 *c++ = (*a++) * (*b++);
113 }
114}
115#endif /* LV_HAVE_AVX2 && LV_HAVE_FMA */
116
117
118#ifdef LV_HAVE_AVX
119#include <immintrin.h>
121
122static inline void volk_32fc_x2_multiply_32fc_u_avx(lv_32fc_t* cVector,
123 const lv_32fc_t* aVector,
124 const lv_32fc_t* bVector,
125 unsigned int num_points)
126{
127 unsigned int number = 0;
128 const unsigned int quarterPoints = num_points / 4;
129
130 __m256 x, y, z;
131 lv_32fc_t* c = cVector;
132 const lv_32fc_t* a = aVector;
133 const lv_32fc_t* b = bVector;
134
135 for (; number < quarterPoints; number++) {
136 x = _mm256_loadu_ps(
137 (float*)a); // Load the ar + ai, br + bi ... as ar,ai,br,bi ...
138 y = _mm256_loadu_ps(
139 (float*)b); // Load the cr + ci, dr + di ... as cr,ci,dr,di ...
140 z = _mm256_complexmul_ps(x, y);
141 _mm256_storeu_ps((float*)c, z); // Store the results back into the C container
142
143 a += 4;
144 b += 4;
145 c += 4;
146 }
147
148 number = quarterPoints * 4;
149
150 for (; number < num_points; number++) {
151 *c++ = (*a++) * (*b++);
152 }
153}
154#endif /* LV_HAVE_AVX */
155
156
157#ifdef LV_HAVE_SSE3
158#include <pmmintrin.h>
160
162 const lv_32fc_t* aVector,
163 const lv_32fc_t* bVector,
164 unsigned int num_points)
165{
166 unsigned int number = 0;
167 const unsigned int halfPoints = num_points / 2;
168
169 __m128 x, y, z;
170 lv_32fc_t* c = cVector;
171 const lv_32fc_t* a = aVector;
172 const lv_32fc_t* b = bVector;
173
174 for (; number < halfPoints; number++) {
175 x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
176 y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
177 z = _mm_complexmul_ps(x, y);
178 _mm_storeu_ps((float*)c, z); // Store the results back into the C container
179
180 a += 2;
181 b += 2;
182 c += 2;
183 }
184
185 if ((num_points % 2) != 0) {
186 *c = (*a) * (*b);
187 }
188}
189#endif /* LV_HAVE_SSE */
190
191
192#ifdef LV_HAVE_GENERIC
193
195 const lv_32fc_t* aVector,
196 const lv_32fc_t* bVector,
197 unsigned int num_points)
198{
199 lv_32fc_t* cPtr = cVector;
200 const lv_32fc_t* aPtr = aVector;
201 const lv_32fc_t* bPtr = bVector;
202 unsigned int number = 0;
203
204 for (number = 0; number < num_points; number++) {
205 *cPtr++ = (*aPtr++) * (*bPtr++);
206 }
207}
208#endif /* LV_HAVE_GENERIC */
209
210
211#endif /* INCLUDED_volk_32fc_x2_multiply_32fc_u_H */
212#ifndef INCLUDED_volk_32fc_x2_multiply_32fc_a_H
213#define INCLUDED_volk_32fc_x2_multiply_32fc_a_H
214
215#include <float.h>
216#include <inttypes.h>
217#include <stdio.h>
218#include <volk/volk_complex.h>
219
220#if LV_HAVE_AVX2 && LV_HAVE_FMA
221#include <immintrin.h>
229static inline void volk_32fc_x2_multiply_32fc_a_avx2_fma(lv_32fc_t* cVector,
230 const lv_32fc_t* aVector,
231 const lv_32fc_t* bVector,
232 unsigned int num_points)
233{
234 unsigned int number = 0;
235 const unsigned int quarterPoints = num_points / 4;
236
237 lv_32fc_t* c = cVector;
238 const lv_32fc_t* a = aVector;
239 const lv_32fc_t* b = bVector;
240
241 for (; number < quarterPoints; number++) {
242
243 const __m256 x =
244 _mm256_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
245 const __m256 y =
246 _mm256_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
247
248 const __m256 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr
249 const __m256 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di
250
251 const __m256 tmp2x = _mm256_permute_ps(x, 0xB1); // Re-arrange x to be ai,ar,bi,br
252
253 const __m256 tmp2 = _mm256_mul_ps(tmp2x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
254
255 const __m256 z = _mm256_fmaddsub_ps(
256 x, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
257
258 _mm256_store_ps((float*)c, z); // Store the results back into the C container
259
260 a += 4;
261 b += 4;
262 c += 4;
263 }
264
265 number = quarterPoints * 4;
266 for (; number < num_points; number++) {
267 *c++ = (*a++) * (*b++);
268 }
269}
270#endif /* LV_HAVE_AVX2 && LV_HAVE_FMA */
271
272
273#ifdef LV_HAVE_AVX
274#include <immintrin.h>
276
277static inline void volk_32fc_x2_multiply_32fc_a_avx(lv_32fc_t* cVector,
278 const lv_32fc_t* aVector,
279 const lv_32fc_t* bVector,
280 unsigned int num_points)
281{
282 unsigned int number = 0;
283 const unsigned int quarterPoints = num_points / 4;
284
285 __m256 x, y, z;
286 lv_32fc_t* c = cVector;
287 const lv_32fc_t* a = aVector;
288 const lv_32fc_t* b = bVector;
289
290 for (; number < quarterPoints; number++) {
291 x = _mm256_load_ps((float*)a); // Load the ar + ai, br + bi ... as ar,ai,br,bi ...
292 y = _mm256_load_ps((float*)b); // Load the cr + ci, dr + di ... as cr,ci,dr,di ...
293 z = _mm256_complexmul_ps(x, y);
294 _mm256_store_ps((float*)c, z); // Store the results back into the C container
295
296 a += 4;
297 b += 4;
298 c += 4;
299 }
300
301 number = quarterPoints * 4;
302
303 for (; number < num_points; number++) {
304 *c++ = (*a++) * (*b++);
305 }
306}
307#endif /* LV_HAVE_AVX */
308
309#ifdef LV_HAVE_SSE3
310#include <pmmintrin.h>
312
314 const lv_32fc_t* aVector,
315 const lv_32fc_t* bVector,
316 unsigned int num_points)
317{
318 unsigned int number = 0;
319 const unsigned int halfPoints = num_points / 2;
320
321 __m128 x, y, z;
322 lv_32fc_t* c = cVector;
323 const lv_32fc_t* a = aVector;
324 const lv_32fc_t* b = bVector;
325
326 for (; number < halfPoints; number++) {
327 x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
328 y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
329 z = _mm_complexmul_ps(x, y);
330 _mm_store_ps((float*)c, z); // Store the results back into the C container
331
332 a += 2;
333 b += 2;
334 c += 2;
335 }
336
337 if ((num_points % 2) != 0) {
338 *c = (*a) * (*b);
339 }
340}
341#endif /* LV_HAVE_SSE */
342
343
344#ifdef LV_HAVE_NEON
345#include <arm_neon.h>
346
347static inline void volk_32fc_x2_multiply_32fc_neon(lv_32fc_t* cVector,
348 const lv_32fc_t* aVector,
349 const lv_32fc_t* bVector,
350 unsigned int num_points)
351{
352 lv_32fc_t* a_ptr = (lv_32fc_t*)aVector;
353 lv_32fc_t* b_ptr = (lv_32fc_t*)bVector;
354 unsigned int quarter_points = num_points / 4;
355 float32x4x2_t a_val, b_val, c_val;
356 float32x4x2_t tmp_real, tmp_imag;
357 unsigned int number = 0;
358
359 for (number = 0; number < quarter_points; ++number) {
360 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
361 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
362 __VOLK_PREFETCH(a_ptr + 4);
363 __VOLK_PREFETCH(b_ptr + 4);
364
365 // multiply the real*real and imag*imag to get real result
366 // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
367 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
368 // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
369 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
370
371 // Multiply cross terms to get the imaginary result
372 // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
373 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
374 // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
375 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
376
377 // store the results
378 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
379 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
380 vst2q_f32((float*)cVector, c_val);
381
382 a_ptr += 4;
383 b_ptr += 4;
384 cVector += 4;
385 }
386
387 for (number = quarter_points * 4; number < num_points; number++) {
388 *cVector++ = (*a_ptr++) * (*b_ptr++);
389 }
390}
391#endif /* LV_HAVE_NEON */
392
393
394#ifdef LV_HAVE_NEONV7
395
396extern void volk_32fc_x2_multiply_32fc_a_neonasm(lv_32fc_t* cVector,
397 const lv_32fc_t* aVector,
398 const lv_32fc_t* bVector,
399 unsigned int num_points);
400#endif /* LV_HAVE_NEONV7 */
401
402
403#ifdef LV_HAVE_NEONV8
404#include <arm_neon.h>
405
406static inline void volk_32fc_x2_multiply_32fc_neonv8(lv_32fc_t* cVector,
407 const lv_32fc_t* aVector,
408 const lv_32fc_t* bVector,
409 unsigned int num_points)
410{
411 unsigned int n = num_points;
412 lv_32fc_t* c = cVector;
413 const lv_32fc_t* a = aVector;
414 const lv_32fc_t* b = bVector;
415
416 /* Process 8 complex numbers per iteration (2x unroll) */
417 while (n >= 8) {
418 /* Load and deinterleave: ar,ai separated */
419 float32x4x2_t a0 = vld2q_f32((const float*)a);
420 float32x4x2_t b0 = vld2q_f32((const float*)b);
421 float32x4x2_t a1 = vld2q_f32((const float*)(a + 4));
422 float32x4x2_t b1 = vld2q_f32((const float*)(b + 4));
423 __VOLK_PREFETCH(a + 8);
424 __VOLK_PREFETCH(b + 8);
425
426 /* Complex multiply: (ar + ai*j)(br + bi*j) = (ar*br - ai*bi) + (ar*bi + ai*br)*j
427 */
428 /* Using FMA: real = ar*br - ai*bi, imag = ar*bi + ai*br */
429 float32x4x2_t c0, c1;
430
431 /* real part: ar*br - ai*bi = fms(ar*br, ai, bi) */
432 c0.val[0] = vfmsq_f32(vmulq_f32(a0.val[0], b0.val[0]), a0.val[1], b0.val[1]);
433 c1.val[0] = vfmsq_f32(vmulq_f32(a1.val[0], b1.val[0]), a1.val[1], b1.val[1]);
434
435 /* imag part: ar*bi + ai*br = fma(ar*bi, ai, br) */
436 c0.val[1] = vfmaq_f32(vmulq_f32(a0.val[0], b0.val[1]), a0.val[1], b0.val[0]);
437 c1.val[1] = vfmaq_f32(vmulq_f32(a1.val[0], b1.val[1]), a1.val[1], b1.val[0]);
438
439 vst2q_f32((float*)c, c0);
440 vst2q_f32((float*)(c + 4), c1);
441
442 a += 8;
443 b += 8;
444 c += 8;
445 n -= 8;
446 }
447
448 /* Process remaining 4 complex numbers */
449 if (n >= 4) {
450 float32x4x2_t a0 = vld2q_f32((const float*)a);
451 float32x4x2_t b0 = vld2q_f32((const float*)b);
452 float32x4x2_t c0;
453 c0.val[0] = vfmsq_f32(vmulq_f32(a0.val[0], b0.val[0]), a0.val[1], b0.val[1]);
454 c0.val[1] = vfmaq_f32(vmulq_f32(a0.val[0], b0.val[1]), a0.val[1], b0.val[0]);
455 vst2q_f32((float*)c, c0);
456 a += 4;
457 b += 4;
458 c += 4;
459 n -= 4;
460 }
461
462 /* Scalar tail */
463 while (n > 0) {
464 *c++ = (*a++) * (*b++);
465 n--;
466 }
467}
468
469#endif /* LV_HAVE_NEONV8 */
470
471
472#ifdef LV_HAVE_ORC
473
474extern void volk_32fc_x2_multiply_32fc_a_orc_impl(lv_32fc_t* cVector,
475 const lv_32fc_t* aVector,
476 const lv_32fc_t* bVector,
477 int num_points);
478
479static inline void volk_32fc_x2_multiply_32fc_u_orc(lv_32fc_t* cVector,
480 const lv_32fc_t* aVector,
481 const lv_32fc_t* bVector,
482 unsigned int num_points)
483{
484 volk_32fc_x2_multiply_32fc_a_orc_impl(cVector, aVector, bVector, num_points);
485}
486
487#endif /* LV_HAVE_ORC */
488
489#ifdef LV_HAVE_RVV
490#include <riscv_vector.h>
491
492static inline void volk_32fc_x2_multiply_32fc_rvv(lv_32fc_t* cVector,
493 const lv_32fc_t* aVector,
494 const lv_32fc_t* bVector,
495 unsigned int num_points)
496{
497 size_t n = num_points;
498 for (size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl, cVector += vl) {
499 vl = __riscv_vsetvl_e32m4(n);
500 vuint64m8_t va = __riscv_vle64_v_u64m8((const uint64_t*)aVector, vl);
501 vuint64m8_t vb = __riscv_vle64_v_u64m8((const uint64_t*)bVector, vl);
502 vfloat32m4_t var = __riscv_vreinterpret_f32m4(__riscv_vnsrl(va, 0, vl));
503 vfloat32m4_t vbr = __riscv_vreinterpret_f32m4(__riscv_vnsrl(vb, 0, vl));
504 vfloat32m4_t vai = __riscv_vreinterpret_f32m4(__riscv_vnsrl(va, 32, vl));
505 vfloat32m4_t vbi = __riscv_vreinterpret_f32m4(__riscv_vnsrl(vb, 32, vl));
506 vfloat32m4_t vr = __riscv_vfnmsac(__riscv_vfmul(var, vbr, vl), vai, vbi, vl);
507 vfloat32m4_t vi = __riscv_vfmacc(__riscv_vfmul(var, vbi, vl), vai, vbr, vl);
508 vuint32m4_t vru = __riscv_vreinterpret_u32m4(vr);
509 vuint32m4_t viu = __riscv_vreinterpret_u32m4(vi);
510 vuint64m8_t v =
511 __riscv_vwmaccu(__riscv_vwaddu_vv(vru, viu, vl), 0xFFFFFFFF, viu, vl);
512 __riscv_vse64((uint64_t*)cVector, v, vl);
513 }
514}
515#endif /*LV_HAVE_RVV*/
516
517#ifdef LV_HAVE_RVVSEG
518#include <riscv_vector.h>
519
520static inline void volk_32fc_x2_multiply_32fc_rvvseg(lv_32fc_t* cVector,
521 const lv_32fc_t* aVector,
522 const lv_32fc_t* bVector,
523 unsigned int num_points)
524{
525 size_t n = num_points;
526 for (size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl, cVector += vl) {
527 vl = __riscv_vsetvl_e32m4(n);
528 vfloat32m4x2_t va = __riscv_vlseg2e32_v_f32m4x2((const float*)aVector, vl);
529 vfloat32m4x2_t vb = __riscv_vlseg2e32_v_f32m4x2((const float*)bVector, vl);
530 vfloat32m4_t var = __riscv_vget_f32m4(va, 0), vai = __riscv_vget_f32m4(va, 1);
531 vfloat32m4_t vbr = __riscv_vget_f32m4(vb, 0), vbi = __riscv_vget_f32m4(vb, 1);
532 vfloat32m4_t vr = __riscv_vfnmsac(__riscv_vfmul(var, vbr, vl), vai, vbi, vl);
533 vfloat32m4_t vi = __riscv_vfmacc(__riscv_vfmul(var, vbi, vl), vai, vbr, vl);
534 __riscv_vsseg2e32_v_f32m4x2(
535 (float*)cVector, __riscv_vcreate_v_f32m4x2(vr, vi), vl);
536 }
537}
538#endif /*LV_HAVE_RVVSEG*/
539
540#endif /* INCLUDED_volk_32fc_x2_multiply_32fc_a_H */