55#ifndef INCLUDED_volk_32f_tanh_32f_a_H
56#define INCLUDED_volk_32f_tanh_32f_a_H
69 unsigned int number = 0;
70 float* cPtr = cVector;
71 const float* aPtr = aVector;
72 for (; number < num_points; number++) {
73 *cPtr++ = tanhf(*aPtr++);
85 float* cPtr = cVector;
86 const float* aPtr = aVector;
87 for (
unsigned int number = 0; number < num_points; number++) {
90 else if (*aPtr <= -4.97)
93 float x2 = (*aPtr) * (*aPtr);
94 float a = (*aPtr) * (135135.0f + x2 * (17325.0f + x2 * (378.0f + x2)));
95 float b = 135135.0f + x2 * (62370.0f + x2 * (3150.0f + x2 * 28.0f));
106#include <xmmintrin.h>
111 unsigned int number = 0;
112 const unsigned int quarterPoints = num_points / 4;
114 float* cPtr = cVector;
115 const float* aPtr = aVector;
117 __m128 aVal, cVal, x2, a, b;
118 __m128 const1, const2, const3, const4, const5, const6;
119 const1 = _mm_set_ps1(135135.0f);
120 const2 = _mm_set_ps1(17325.0f);
121 const3 = _mm_set_ps1(378.0f);
122 const4 = _mm_set_ps1(62370.0f);
123 const5 = _mm_set_ps1(3150.0f);
124 const6 = _mm_set_ps1(28.0f);
125 for (; number < quarterPoints; number++) {
127 aVal = _mm_load_ps(aPtr);
128 x2 = _mm_mul_ps(aVal, aVal);
134 _mm_add_ps(const2, _mm_mul_ps(x2, _mm_add_ps(const3, x2))))));
140 _mm_mul_ps(x2, _mm_add_ps(const5, _mm_mul_ps(x2, const6))))));
142 cVal = _mm_div_ps(a, b);
144 _mm_store_ps(cPtr, cVal);
150 number = quarterPoints * 4;
157#include <immintrin.h>
162 unsigned int number = 0;
163 const unsigned int eighthPoints = num_points / 8;
165 float* cPtr = cVector;
166 const float* aPtr = aVector;
168 __m256 aVal, cVal, x2, a, b;
169 __m256 const1, const2, const3, const4, const5, const6;
170 const1 = _mm256_set1_ps(135135.0f);
171 const2 = _mm256_set1_ps(17325.0f);
172 const3 = _mm256_set1_ps(378.0f);
173 const4 = _mm256_set1_ps(62370.0f);
174 const5 = _mm256_set1_ps(3150.0f);
175 const6 = _mm256_set1_ps(28.0f);
176 for (; number < eighthPoints; number++) {
178 aVal = _mm256_load_ps(aPtr);
179 x2 = _mm256_mul_ps(aVal, aVal);
186 _mm256_add_ps(const2,
187 _mm256_mul_ps(x2, _mm256_add_ps(const3, x2))))));
195 _mm256_add_ps(const5, _mm256_mul_ps(x2, const6))))));
197 cVal = _mm256_div_ps(a, b);
199 _mm256_store_ps(cPtr, cVal);
205 number = eighthPoints * 8;
210#if LV_HAVE_AVX && LV_HAVE_FMA
211#include <immintrin.h>
214volk_32f_tanh_32f_a_avx_fma(
float* cVector,
const float* aVector,
unsigned int num_points)
216 unsigned int number = 0;
217 const unsigned int eighthPoints = num_points / 8;
219 float* cPtr = cVector;
220 const float* aPtr = aVector;
222 __m256 aVal, cVal, x2, a, b;
223 __m256 const1, const2, const3, const4, const5, const6;
224 const1 = _mm256_set1_ps(135135.0f);
225 const2 = _mm256_set1_ps(17325.0f);
226 const3 = _mm256_set1_ps(378.0f);
227 const4 = _mm256_set1_ps(62370.0f);
228 const5 = _mm256_set1_ps(3150.0f);
229 const6 = _mm256_set1_ps(28.0f);
230 for (; number < eighthPoints; number++) {
232 aVal = _mm256_load_ps(aPtr);
233 x2 = _mm256_mul_ps(aVal, aVal);
237 x2, _mm256_fmadd_ps(x2, _mm256_add_ps(const3, x2), const2), const1));
239 x2, _mm256_fmadd_ps(x2, _mm256_fmadd_ps(x2, const6, const5), const4), const1);
241 cVal = _mm256_div_ps(a, b);
243 _mm256_store_ps(cPtr, cVal);
249 number = eighthPoints * 8;
260 unsigned int number = 0;
261 const unsigned int quarterPoints = num_points / 4;
263 float* cPtr = cVector;
264 const float* aPtr = aVector;
267 const float32x4_t const1 = vdupq_n_f32(135135.0f);
268 const float32x4_t const2 = vdupq_n_f32(17325.0f);
269 const float32x4_t const3 = vdupq_n_f32(378.0f);
270 const float32x4_t const4 = vdupq_n_f32(62370.0f);
271 const float32x4_t const5 = vdupq_n_f32(3150.0f);
272 const float32x4_t const6 = vdupq_n_f32(28.0f);
274 for (; number < quarterPoints; number++) {
275 float32x4_t aVal = vld1q_f32(aPtr);
276 float32x4_t x2 = vmulq_f32(aVal, aVal);
279 float32x4_t inner_a = vaddq_f32(const3, x2);
280 inner_a = vmlaq_f32(const2, x2, inner_a);
281 inner_a = vmlaq_f32(const1, x2, inner_a);
282 float32x4_t a = vmulq_f32(aVal, inner_a);
285 float32x4_t inner_b = vmlaq_f32(const5, x2, const6);
286 inner_b = vmlaq_f32(const4, x2, inner_b);
287 float32x4_t b = vmlaq_f32(const1, x2, inner_b);
290 float32x4_t b_recip = vrecpeq_f32(b);
291 b_recip = vmulq_f32(b_recip, vrecpsq_f32(b, b_recip));
292 b_recip = vmulq_f32(b_recip, vrecpsq_f32(b, b_recip));
293 float32x4_t cVal = vmulq_f32(a, b_recip);
295 vst1q_f32(cPtr, cVal);
300 number = quarterPoints * 4;
310volk_32f_tanh_32f_neonv8(
float* cVector,
const float* aVector,
unsigned int num_points)
312 unsigned int number = 0;
313 const unsigned int eighthPoints = num_points / 8;
315 float* cPtr = cVector;
316 const float* aPtr = aVector;
319 const float32x4_t const1 = vdupq_n_f32(135135.0f);
320 const float32x4_t const2 = vdupq_n_f32(17325.0f);
321 const float32x4_t const3 = vdupq_n_f32(378.0f);
322 const float32x4_t const4 = vdupq_n_f32(62370.0f);
323 const float32x4_t const5 = vdupq_n_f32(3150.0f);
324 const float32x4_t const6 = vdupq_n_f32(28.0f);
326 for (; number < eighthPoints; number++) {
329 float32x4_t aVal0 = vld1q_f32(aPtr);
330 float32x4_t aVal1 = vld1q_f32(aPtr + 4);
331 float32x4_t x2_0 = vmulq_f32(aVal0, aVal0);
332 float32x4_t x2_1 = vmulq_f32(aVal1, aVal1);
335 float32x4_t inner_a0 = vaddq_f32(const3, x2_0);
336 float32x4_t inner_a1 = vaddq_f32(const3, x2_1);
337 inner_a0 = vfmaq_f32(const2, x2_0, inner_a0);
338 inner_a1 = vfmaq_f32(const2, x2_1, inner_a1);
339 inner_a0 = vfmaq_f32(const1, x2_0, inner_a0);
340 inner_a1 = vfmaq_f32(const1, x2_1, inner_a1);
341 float32x4_t a0 = vmulq_f32(aVal0, inner_a0);
342 float32x4_t a1 = vmulq_f32(aVal1, inner_a1);
345 float32x4_t inner_b0 = vfmaq_f32(const5, x2_0, const6);
346 float32x4_t inner_b1 = vfmaq_f32(const5, x2_1, const6);
347 inner_b0 = vfmaq_f32(const4, x2_0, inner_b0);
348 inner_b1 = vfmaq_f32(const4, x2_1, inner_b1);
349 float32x4_t b0 = vfmaq_f32(const1, x2_0, inner_b0);
350 float32x4_t b1 = vfmaq_f32(const1, x2_1, inner_b1);
353 float32x4_t cVal0 = vdivq_f32(a0, b0);
354 float32x4_t cVal1 = vdivq_f32(a1, b1);
356 vst1q_f32(cPtr, cVal0);
357 vst1q_f32(cPtr + 4, cVal1);
362 number = eighthPoints * 8;
371#ifndef INCLUDED_volk_32f_tanh_32f_u_H
372#define INCLUDED_volk_32f_tanh_32f_u_H
381#include <xmmintrin.h>
386 unsigned int number = 0;
387 const unsigned int quarterPoints = num_points / 4;
389 float* cPtr = cVector;
390 const float* aPtr = aVector;
392 __m128 aVal, cVal, x2, a, b;
393 __m128 const1, const2, const3, const4, const5, const6;
394 const1 = _mm_set_ps1(135135.0f);
395 const2 = _mm_set_ps1(17325.0f);
396 const3 = _mm_set_ps1(378.0f);
397 const4 = _mm_set_ps1(62370.0f);
398 const5 = _mm_set_ps1(3150.0f);
399 const6 = _mm_set_ps1(28.0f);
400 for (; number < quarterPoints; number++) {
402 aVal = _mm_loadu_ps(aPtr);
403 x2 = _mm_mul_ps(aVal, aVal);
409 _mm_add_ps(const2, _mm_mul_ps(x2, _mm_add_ps(const3, x2))))));
415 _mm_mul_ps(x2, _mm_add_ps(const5, _mm_mul_ps(x2, const6))))));
417 cVal = _mm_div_ps(a, b);
419 _mm_storeu_ps(cPtr, cVal);
425 number = quarterPoints * 4;
432#include <immintrin.h>
437 unsigned int number = 0;
438 const unsigned int eighthPoints = num_points / 8;
440 float* cPtr = cVector;
441 const float* aPtr = aVector;
443 __m256 aVal, cVal, x2, a, b;
444 __m256 const1, const2, const3, const4, const5, const6;
445 const1 = _mm256_set1_ps(135135.0f);
446 const2 = _mm256_set1_ps(17325.0f);
447 const3 = _mm256_set1_ps(378.0f);
448 const4 = _mm256_set1_ps(62370.0f);
449 const5 = _mm256_set1_ps(3150.0f);
450 const6 = _mm256_set1_ps(28.0f);
451 for (; number < eighthPoints; number++) {
453 aVal = _mm256_loadu_ps(aPtr);
454 x2 = _mm256_mul_ps(aVal, aVal);
461 _mm256_add_ps(const2,
462 _mm256_mul_ps(x2, _mm256_add_ps(const3, x2))))));
470 _mm256_add_ps(const5, _mm256_mul_ps(x2, const6))))));
472 cVal = _mm256_div_ps(a, b);
474 _mm256_storeu_ps(cPtr, cVal);
480 number = eighthPoints * 8;
485#if LV_HAVE_AVX && LV_HAVE_FMA
486#include <immintrin.h>
489volk_32f_tanh_32f_u_avx_fma(
float* cVector,
const float* aVector,
unsigned int num_points)
491 unsigned int number = 0;
492 const unsigned int eighthPoints = num_points / 8;
494 float* cPtr = cVector;
495 const float* aPtr = aVector;
497 __m256 aVal, cVal, x2, a, b;
498 __m256 const1, const2, const3, const4, const5, const6;
499 const1 = _mm256_set1_ps(135135.0f);
500 const2 = _mm256_set1_ps(17325.0f);
501 const3 = _mm256_set1_ps(378.0f);
502 const4 = _mm256_set1_ps(62370.0f);
503 const5 = _mm256_set1_ps(3150.0f);
504 const6 = _mm256_set1_ps(28.0f);
505 for (; number < eighthPoints; number++) {
507 aVal = _mm256_loadu_ps(aPtr);
508 x2 = _mm256_mul_ps(aVal, aVal);
512 x2, _mm256_fmadd_ps(x2, _mm256_add_ps(const3, x2), const2), const1));
514 x2, _mm256_fmadd_ps(x2, _mm256_fmadd_ps(x2, const6, const5), const4), const1);
516 cVal = _mm256_div_ps(a, b);
518 _mm256_storeu_ps(cPtr, cVal);
524 number = eighthPoints * 8;
530#include <riscv_vector.h>
533volk_32f_tanh_32f_rvv(
float* bVector,
const float* aVector,
unsigned int num_points)
535 size_t vlmax = __riscv_vsetvlmax_e32m2();
537 const vfloat32m2_t c1 = __riscv_vfmv_v_f_f32m2(135135.0f, vlmax);
538 const vfloat32m2_t c2 = __riscv_vfmv_v_f_f32m2(17325.0f, vlmax);
539 const vfloat32m2_t c3 = __riscv_vfmv_v_f_f32m2(378.0f, vlmax);
540 const vfloat32m2_t c4 = __riscv_vfmv_v_f_f32m2(62370.0f, vlmax);
541 const vfloat32m2_t c5 = __riscv_vfmv_v_f_f32m2(3150.0f, vlmax);
542 const vfloat32m2_t c6 = __riscv_vfmv_v_f_f32m2(28.0f, vlmax);
544 size_t n = num_points;
545 for (
size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl) {
546 vl = __riscv_vsetvl_e32m2(n);
547 vfloat32m2_t x = __riscv_vle32_v_f32m2(aVector, vl);
548 vfloat32m2_t xx = __riscv_vfmul(x, x, vl);
550 a = __riscv_vfadd(xx, c3, vl);
551 a = __riscv_vfmadd(a, xx, c2, vl);
552 a = __riscv_vfmadd(a, xx, c1, vl);
553 a = __riscv_vfmul(a, x, vl);
555 b = __riscv_vfmadd(b, xx, c5, vl);
556 b = __riscv_vfmadd(b, xx, c4, vl);
557 b = __riscv_vfmadd(b, xx, c1, vl);
558 __riscv_vse32(bVector, __riscv_vfdiv(a, b, vl), vl);