[hamradio-commits] [gnss-sdr] 08/236: Added a new volk_gnsssdr kernel that integrates both the phase rotator and n dot_product kernels. Enabled in cpu_multicorrelator_16sc

Carles Fernandez carles_fernandez-guest at moszumanska.debian.org
Tue Apr 26 16:02:08 UTC 2016


This is an automated email from the git hooks/post-receive script.

carles_fernandez-guest pushed a commit to branch next
in repository gnss-sdr.

commit 9bf47106794d7b48b6415624b105d8a06ce7f0c5
Author: Javier Arribas <javiarribas at gmail.com>
Date:   Tue Feb 9 11:49:18 2016 +0100

    Added a new volk_gnsssdr kernel that integrates both the phase rotator
    and n dot_product kernels. Enabled in cpu_multicorrelator_16sc
---
 ...volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h | 398 +++++++++++++++++++++
 ..._gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h | 134 +++++++
 .../volk_gnsssdr/lib/kernel_tests.h                |   1 +
 .../tracking/libs/cpu_multicorrelator_16sc.cc      |  41 ++-
 .../tracking/libs/cpu_multicorrelator_16sc.h       |   5 +-
 5 files changed, 556 insertions(+), 23 deletions(-)

diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h
new file mode 100644
index 0000000..a958743
--- /dev/null
+++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h
@@ -0,0 +1,398 @@
+/*!
+ * \file volk_gnsssdr_16ic_x2_dot_prod_16ic_xn.h
+ * \brief Volk protokernel: multiplies N 16 bits vectors by a common vector phase rotated and accumulates the results in N 16 bits short complex outputs.
+ * \authors <ul>
+ *          <li> Javier Arribas, 2015. jarribas(at)cttc.es
+ *          </ul>
+ *
+ * Volk protokernel that multiplies N 16 bits vectors by a common vector, which is phase-rotated by phase offset and phase increment, and accumulates the results in N 16 bits short complex outputs.
+ * It is optimized to perform the N tap correlation process in GNSS receivers.
+ *
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (C) 2010-2015  (see AUTHORS file for a list of contributors)
+ *
+ * GNSS-SDR is a software defined Global Navigation
+ *          Satellite Systems receiver
+ *
+ * This file is part of GNSS-SDR.
+ *
+ * GNSS-SDR is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GNSS-SDR is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNSS-SDR. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifndef INCLUDED_volk_gnsssdr_16ic_xn_rotator_dot_prod_16ic_xn_H
+#define INCLUDED_volk_gnsssdr_16ic_xn_rotator_dot_prod_16ic_xn_H
+
+
+#include <volk_gnsssdr/volk_gnsssdr_complex.h>
+#include <volk_gnsssdr/saturation_arithmetic.h>
+
+#ifdef LV_HAVE_GENERIC
+/*!
+ \brief Multiplies the reference complex vector with multiple versions of another complex vector, accumulates the results and stores them in the output vector
+ \param[out] result        Array of num_a_vectors components with the multiple versions of in_a multiplied and accumulated The vector where the accumulated result will be stored
+ \param[in]  in_common     Pointer to one of the vectors to be multiplied and accumulated (reference vector)
+ \param[in]  in_a          Pointer to an array of pointers to multiple versions of the other vector to be multiplied and accumulated
+ \param[in]  num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated
+ \param[in]  num_points    The Number of complex values to be multiplied together, accumulated and stored into result
+ */
+static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_generic(lv_16sc_t* result, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a, int num_a_vectors, unsigned int num_points)
+{
+	lv_16sc_t tmp16;
+	lv_32fc_t tmp32;
+	for (int n_vec = 0; n_vec < num_a_vectors; n_vec++)
+	{
+		result[n_vec] = lv_cmake(0,0);
+	}
+	for (unsigned int n = 0; n < num_points; n++)
+	{
+		tmp16 = *in_common++;
+		tmp32 = lv_cmake((float)lv_creal(tmp16), (float)lv_cimag(tmp16)) * (*phase);
+		tmp16 = lv_cmake((int16_t)rintf(lv_creal(tmp32)), (int16_t)rintf(lv_cimag(tmp32)));
+		(*phase) *= phase_inc;
+		for (int n_vec = 0; n_vec < num_a_vectors; n_vec++)
+		{
+			lv_16sc_t tmp = tmp16 * in_a[n_vec][n];
+			result[n_vec] = lv_cmake(sat_adds16i(lv_creal(result[n_vec]), lv_creal(tmp)), sat_adds16i(lv_cimag(result[n_vec]), lv_cimag(tmp)));
+		}
+	}
+}
+
+#endif /*LV_HAVE_GENERIC*/
+
+
+#ifdef LV_HAVE_SSE3
+#include <pmmintrin.h>
+
+/*!
+ \brief Multiplies the reference complex vector with multiple versions of another complex vector, accumulates the results and stores them in the output vector
+ \param[out] result        Array of num_a_vectors components with the multiple versions of in_a multiplied and accumulated The vector where the accumulated result will be stored
+ \param[in]  in_common     Pointer to one of the vectors to be multiplied and accumulated (reference vector)
+ \param[in]  in_a          Pointer to an array of pointers to multiple versions of the other vector to be multiplied and accumulated
+ \param[in]  num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated
+ \param[in]  num_points    The Number of complex values to be multiplied together, accumulated and stored into result
+ */
+static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a,  int num_a_vectors, unsigned int num_points)
+{
+	lv_16sc_t dotProduct = lv_cmake(0,0);
+
+	const unsigned int sse_iters = num_points / 4;
+
+	const lv_16sc_t** _in_a = in_a;
+	const lv_16sc_t* _in_common = in_common;
+	lv_16sc_t* _out = out;
+
+	__VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4];
+
+	//todo dyn mem reg
+
+	__m128i* realcacc;
+	__m128i* imagcacc;
+
+	realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0
+	imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0
+
+	__m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result;
+
+	mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0);
+	mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255);
+
+	// phase rotation registers
+	__m128 pa, pb, two_phase_acc_reg, two_phase_inc_reg;
+	__m128i pc1, pc2;
+	__attribute__((aligned(16))) lv_32fc_t two_phase_inc[2];
+	two_phase_inc[0] = phase_inc * phase_inc;
+	two_phase_inc[1] = phase_inc * phase_inc;
+	two_phase_inc_reg = _mm_load_ps((float*) two_phase_inc);
+	__attribute__((aligned(16))) lv_32fc_t two_phase_acc[2];
+	two_phase_acc[0] = (*phase);
+	two_phase_acc[1] = (*phase) * phase_inc;
+	two_phase_acc_reg = _mm_load_ps((float*)two_phase_acc);
+	__m128 yl, yh, tmp1, tmp2, tmp3;
+	lv_16sc_t tmp16;
+	lv_32fc_t tmp32;
+
+	for(unsigned int number = 0; number < sse_iters; number++)
+	{
+		// Phase rotation on operand in_common starts here:
+
+		pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
+		//complex 32fc multiplication b=a*two_phase_acc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(pa, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		pa = _mm_shuffle_ps(pa, pa, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(pa, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		pb = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+		pc1 = _mm_cvtps_epi32(pb); // convert from 32fc to 32ic
+
+		//complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+
+		//next two samples
+		_in_common += 2;
+		pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
+		//complex 32fc multiplication b=a*two_phase_acc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(pa, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		pa = _mm_shuffle_ps(pa, pa, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(pa, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		pb = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+		pc2 = _mm_cvtps_epi32(pb); // convert from 32fc to 32ic
+
+		//complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+
+		// store four rotated in_common samples in the register b
+		b = _mm_packs_epi32(pc1, pc2);// convert from 32ic to 16ic
+
+		//next two samples
+		_in_common += 2;
+
+		for (int n_vec = 0; n_vec < num_a_vectors; n_vec++)
+		{
+			a = _mm_load_si128((__m128i*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
+
+			c = _mm_mullo_epi16 (a, b); // a3.i*b3.i, a3.r*b3.r, ....
+
+			c_sr = _mm_srli_si128 (c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
+			real = _mm_subs_epi16 (c, c_sr);
+
+			b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i ....
+			a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i ....
+
+			imag1 = _mm_mullo_epi16(a, b_sl); // a3.i*b3.r, ....
+			imag2 = _mm_mullo_epi16(b, a_sl); // b3.i*a3.r, ....
+
+			imag = _mm_adds_epi16(imag1, imag2);
+
+			realcacc[n_vec] = _mm_adds_epi16 (realcacc[n_vec], real);
+			imagcacc[n_vec] = _mm_adds_epi16 (imagcacc[n_vec], imag);
+
+		}
+	}
+
+	for (int n_vec=0;n_vec<num_a_vectors;n_vec++)
+	{
+		realcacc[n_vec] = _mm_and_si128 (realcacc[n_vec], mask_real);
+		imagcacc[n_vec] = _mm_and_si128 (imagcacc[n_vec], mask_imag);
+
+		result = _mm_or_si128 (realcacc[n_vec], imagcacc[n_vec]);
+
+		_mm_store_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector
+		dotProduct = lv_cmake(0,0);
+		for (int i = 0; i<4; ++i)
+		{
+			dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[i])),
+					sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[i])));
+		}
+		_out[n_vec] = dotProduct;
+	}
+	free(realcacc);
+	free(imagcacc);
+
+	_mm_store_ps((float*)two_phase_acc, two_phase_acc_reg);
+	(*phase) = lv_cmake(two_phase_acc[0], two_phase_acc[0]);
+
+	for (int n_vec = 0; n_vec < num_a_vectors; n_vec++)
+	{
+		for(unsigned int n  = sse_iters * 4; n < num_points; n++)
+		{
+			tmp16 = *in_common++;
+			tmp32 = lv_cmake((float)lv_creal(tmp16), (float)lv_cimag(tmp16)) * (*phase);
+			tmp16 = lv_cmake((int16_t)rintf(lv_creal(tmp32)), (int16_t)rintf(lv_cimag(tmp32)));
+			(*phase) *= phase_inc;
+			lv_16sc_t tmp = tmp16 * in_a[n_vec][n];
+			_out[n_vec] = lv_cmake(sat_adds16i(lv_creal(_out[n_vec]), lv_creal(tmp)),
+					sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp)));
+		}
+	}
+
+}
+#endif /* LV_HAVE_SSE3 */
+
+#ifdef LV_HAVE_SSE3
+#include <pmmintrin.h>
+
+/*!
+ \brief Multiplies the reference complex vector with multiple versions of another complex vector, accumulates the results and stores them in the output vector
+ \param[out] result        Array of num_a_vectors components with the multiple versions of in_a multiplied and accumulated The vector where the accumulated result will be stored
+ \param[in]  in_common     Pointer to one of the vectors to be multiplied and accumulated (reference vector)
+ \param[in]  in_a          Pointer to an array of pointers to multiple versions of the other vector to be multiplied and accumulated
+ \param[in]  num_a_vectors Number of vectors to be multiplied by the reference vector and accumulated
+ \param[in]  num_points    The Number of complex values to be multiplied together, accumulated and stored into result
+ */
+static inline void volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(lv_16sc_t* out, const lv_16sc_t* in_common, const lv_32fc_t phase_inc, lv_32fc_t* phase, const lv_16sc_t** in_a,  int num_a_vectors, unsigned int num_points)
+{
+	lv_16sc_t dotProduct = lv_cmake(0,0);
+
+	const unsigned int sse_iters = num_points / 4;
+
+	const lv_16sc_t** _in_a = in_a;
+	const lv_16sc_t* _in_common = in_common;
+	lv_16sc_t* _out = out;
+
+	__VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4];
+
+	//todo dyn mem reg
+
+	__m128i* realcacc;
+	__m128i* imagcacc;
+
+	realcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0
+	imagcacc = (__m128i*)calloc(num_a_vectors, sizeof(__m128i)); //calloc also sets memory to 0
+
+	__m128i a,b,c, c_sr, mask_imag, mask_real, real, imag, imag1,imag2, b_sl, a_sl, result;
+
+	mask_imag = _mm_set_epi8(255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0);
+	mask_real = _mm_set_epi8(0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255, 0, 0, 255, 255);
+
+	// phase rotation registers
+	__m128 pa, pb, two_phase_acc_reg, two_phase_inc_reg;
+	__m128i pc1, pc2;
+	__attribute__((aligned(16))) lv_32fc_t two_phase_inc[2];
+	two_phase_inc[0] = phase_inc * phase_inc;
+	two_phase_inc[1] = phase_inc * phase_inc;
+	two_phase_inc_reg = _mm_load_ps((float*) two_phase_inc);
+	__attribute__((aligned(16))) lv_32fc_t two_phase_acc[2];
+	two_phase_acc[0] = (*phase);
+	two_phase_acc[1] = (*phase) * phase_inc;
+	two_phase_acc_reg = _mm_load_ps((float*)two_phase_acc);
+	__m128 yl, yh, tmp1, tmp2, tmp3;
+	lv_16sc_t tmp16;
+	lv_32fc_t tmp32;
+
+	for(unsigned int number = 0; number < sse_iters; number++)
+	{
+		// Phase rotation on operand in_common starts here:
+
+		pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
+		//complex 32fc multiplication b=a*two_phase_acc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(pa, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		pa = _mm_shuffle_ps(pa, pa, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(pa, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		pb = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+		pc1 = _mm_cvtps_epi32(pb); // convert from 32fc to 32ic
+
+		//complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+
+		//next two samples
+		_in_common += 2;
+		pa = _mm_set_ps((float)(lv_cimag(_in_common[1])), (float)(lv_creal(_in_common[1])), (float)(lv_cimag(_in_common[0])), (float)(lv_creal(_in_common[0]))); // //load (2 byte imag, 2 byte real) x 2 into 128 bits reg
+		//complex 32fc multiplication b=a*two_phase_acc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(pa, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		pa = _mm_shuffle_ps(pa, pa, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(pa, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		pb = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+		pc2 = _mm_cvtps_epi32(pb); // convert from 32fc to 32ic
+
+		//complex 32fc multiplication two_phase_acc_reg=two_phase_acc_reg*two_phase_inc_reg
+		yl = _mm_moveldup_ps(two_phase_acc_reg); // Load yl with cr,cr,dr,dr
+		yh = _mm_movehdup_ps(two_phase_acc_reg); // Load yh with ci,ci,di,di
+		tmp1 = _mm_mul_ps(two_phase_inc_reg, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
+		tmp3 = _mm_shuffle_ps(two_phase_inc_reg, two_phase_inc_reg, 0xB1); // Re-arrange x to be ai,ar,bi,br
+		tmp2 = _mm_mul_ps(tmp3, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
+		two_phase_acc_reg = _mm_addsub_ps(tmp1, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
+
+		// store four rotated in_common samples in the register b
+		b = _mm_packs_epi32(pc1, pc2);// convert from 32ic to 16ic
+
+		//next two samples
+		_in_common += 2;
+
+		for (int n_vec = 0; n_vec < num_a_vectors; n_vec++)
+		{
+			a = _mm_loadu_si128((__m128i*)&(_in_a[n_vec][number*4])); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
+
+			c = _mm_mullo_epi16 (a, b); // a3.i*b3.i, a3.r*b3.r, ....
+
+			c_sr = _mm_srli_si128 (c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
+			real = _mm_subs_epi16 (c, c_sr);
+
+			b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i ....
+			a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i ....
+
+			imag1 = _mm_mullo_epi16(a, b_sl); // a3.i*b3.r, ....
+			imag2 = _mm_mullo_epi16(b, a_sl); // b3.i*a3.r, ....
+
+			imag = _mm_adds_epi16(imag1, imag2);
+
+			realcacc[n_vec] = _mm_adds_epi16 (realcacc[n_vec], real);
+			imagcacc[n_vec] = _mm_adds_epi16 (imagcacc[n_vec], imag);
+
+		}
+	}
+
+	for (int n_vec=0;n_vec<num_a_vectors;n_vec++)
+	{
+		realcacc[n_vec] = _mm_and_si128 (realcacc[n_vec], mask_real);
+		imagcacc[n_vec] = _mm_and_si128 (imagcacc[n_vec], mask_imag);
+
+		result = _mm_or_si128 (realcacc[n_vec], imagcacc[n_vec]);
+
+		_mm_storeu_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector
+		dotProduct = lv_cmake(0,0);
+		for (int i = 0; i<4; ++i)
+		{
+			dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[i])),
+					sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[i])));
+		}
+		_out[n_vec] = dotProduct;
+	}
+	free(realcacc);
+	free(imagcacc);
+
+	_mm_store_ps((float*)two_phase_acc, two_phase_acc_reg);
+	(*phase) = lv_cmake(two_phase_acc[0], two_phase_acc[0]);
+
+	for (int n_vec = 0; n_vec < num_a_vectors; n_vec++)
+	{
+		for(unsigned int n  = sse_iters * 4; n < num_points; n++)
+		{
+			tmp16 = *in_common++;
+			tmp32 = lv_cmake((float)lv_creal(tmp16), (float)lv_cimag(tmp16)) * (*phase);
+			tmp16 = lv_cmake((int16_t)rintf(lv_creal(tmp32)), (int16_t)rintf(lv_cimag(tmp32)));
+			(*phase) *= phase_inc;
+			lv_16sc_t tmp = tmp16 * in_a[n_vec][n];
+			_out[n_vec] = lv_cmake(sat_adds16i(lv_creal(_out[n_vec]), lv_creal(tmp)),
+					sat_adds16i(lv_cimag(_out[n_vec]), lv_cimag(tmp)));
+		}
+	}
+
+}
+#endif /* LV_HAVE_SSE3 */
+#endif /*INCLUDED_volk_gnsssdr_16ic_xn_dot_prod_16ic_xn_H*/
diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h
new file mode 100644
index 0000000..57ee851
--- /dev/null
+++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/kernels/volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic.h
@@ -0,0 +1,134 @@
+/*!
+ * \file volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic.h
+ * \brief Volk puppet for the multiple 16-bit complex dot product kernel
+ * \authors <ul>
+ *          <li> Carles Fernandez Prades 2016 cfernandez at cttc dot cat
+ *          </ul>
+ *
+ * Volk puppet for integrating the resampler into volk's test system
+ *
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (C) 2010-2015  (see AUTHORS file for a list of contributors)
+ *
+ * GNSS-SDR is a software defined Global Navigation
+ *          Satellite Systems receiver
+ *
+ * This file is part of GNSS-SDR.
+ *
+ * GNSS-SDR is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GNSS-SDR is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNSS-SDR. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifndef INCLUDED_volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_H
+#define INCLUDED_volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_H
+
+#include "volk_gnsssdr/volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn.h"
+#include <volk_gnsssdr/volk_gnsssdr_malloc.h>
+#include <volk_gnsssdr/volk_gnsssdr_complex.h>
+#include <volk_gnsssdr/volk_gnsssdr.h>
+#include <string.h>
+
+#ifdef LV_HAVE_GENERIC
+static inline void volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_generic(lv_16sc_t* result, const lv_16sc_t* local_code,  const lv_16sc_t* in, unsigned int num_points)
+{
+    // phases must be normalized. Phase rotator expects a complex exponential input!
+    float rem_carrier_phase_in_rad = 0.345;
+    float phase_step_rad = 0.123;
+    lv_32fc_t phase[1];
+    phase[0] = lv_cmake(cos(rem_carrier_phase_in_rad), -sin(rem_carrier_phase_in_rad));
+    lv_32fc_t phase_inc[1];
+    phase_inc[0] = lv_cmake(cos(phase_step_rad), -sin(phase_step_rad));
+
+    int num_a_vectors = 3;
+    lv_16sc_t** in_a = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_a_vectors, volk_gnsssdr_get_alignment());
+    for(unsigned int n = 0; n < num_a_vectors; n++)
+    {
+       in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
+       memcpy(in_a[n], in, sizeof(lv_16sc_t) * num_points);
+    }
+    volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_generic(result, local_code, phase_inc[0], phase,(const lv_16sc_t**) in_a, num_a_vectors, num_points);
+
+    for(unsigned int n = 0; n < num_a_vectors; n++)
+    {
+        volk_gnsssdr_free(in_a[n]);
+    }
+    volk_gnsssdr_free(in_a);
+}
+
+#endif  // Generic
+
+#ifdef LV_HAVE_SSE3
+static inline void volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_a_sse3(lv_16sc_t* result, const lv_16sc_t* local_code, const lv_16sc_t* in, unsigned int num_points)
+{
+    // phases must be normalized. Phase rotator expects a complex exponential input!
+    float rem_carrier_phase_in_rad = 0.345;
+    float phase_step_rad = 0.123;
+    lv_32fc_t phase[1];
+    phase[0] = lv_cmake(cos(rem_carrier_phase_in_rad), -sin(rem_carrier_phase_in_rad));
+    lv_32fc_t phase_inc[1];
+    phase_inc[0] = lv_cmake(cos(phase_step_rad), -sin(phase_step_rad));
+
+    int num_a_vectors = 3;
+    lv_16sc_t** in_a = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_a_vectors, volk_gnsssdr_get_alignment());
+    for(unsigned int n = 0; n < num_a_vectors; n++)
+    {
+       in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t) * num_points, volk_gnsssdr_get_alignment());
+       memcpy((lv_16sc_t*)in_a[n], (lv_16sc_t*)in, sizeof(lv_16sc_t) * num_points);
+    }
+    volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_a_sse3(result, local_code, phase_inc[0], phase, (const lv_16sc_t**) in_a, num_a_vectors, num_points);
+
+    for(unsigned int n = 0; n < num_a_vectors; n++)
+    {
+        volk_gnsssdr_free(in_a[n]);
+    }
+    volk_gnsssdr_free(in_a);
+}
+
+#endif // SSE3
+
+#ifdef LV_HAVE_SSE3
+
+static inline void volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_u_sse3(lv_16sc_t* result, const lv_16sc_t* local_code, const lv_16sc_t* in, unsigned int num_points)
+{
+    // phases must be normalized. Phase rotator expects a complex exponential input!
+    float rem_carrier_phase_in_rad = 0.345;
+    float phase_step_rad = 0.123;
+    lv_32fc_t phase[1];
+    phase[0] = lv_cmake(cos(rem_carrier_phase_in_rad), -sin(rem_carrier_phase_in_rad));
+    lv_32fc_t phase_inc[1];
+    phase_inc[0] = lv_cmake(cos(phase_step_rad), -sin(phase_step_rad));
+
+    int num_a_vectors = 3;
+    lv_16sc_t** in_a = (lv_16sc_t**)volk_gnsssdr_malloc(sizeof(lv_16sc_t*) * num_a_vectors, volk_gnsssdr_get_alignment());
+    for(unsigned int n = 0; n < num_a_vectors; n++)
+    {
+       in_a[n] = (lv_16sc_t*)volk_gnsssdr_malloc(sizeof(lv_16sc_t)*num_points, volk_gnsssdr_get_alignment());
+       memcpy(in_a[n], in, sizeof(lv_16sc_t)*num_points);
+    }
+    volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn_u_sse3(result, local_code, phase_inc[0], phase, (const lv_16sc_t**) in_a, num_a_vectors, num_points);
+
+    for(unsigned int n = 0; n < num_a_vectors; n++)
+    {
+        volk_gnsssdr_free(in_a[n]);
+    }
+    volk_gnsssdr_free(in_a);
+}
+
+#endif // SSE3
+
+#endif  // INCLUDED_volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic_H
+
+
diff --git a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/kernel_tests.h b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/kernel_tests.h
index 0235218..97bf1ee 100644
--- a/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/kernel_tests.h
+++ b/src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/lib/kernel_tests.h
@@ -81,6 +81,7 @@ std::vector<volk_gnsssdr_test_case_t> init_test_list(volk_gnsssdr_test_params_t
         (VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerpuppet_16ic, volk_gnsssdr_16ic_resampler_16ic, test_params))
         (VOLK_INIT_PUPP(volk_gnsssdr_16ic_resamplerxnpuppet_16ic, volk_gnsssdr_16ic_xn_resampler_16ic_xn, test_params))
         (VOLK_INIT_PUPP(volk_gnsssdr_16ic_x2_dotprodxnpuppet_16ic, volk_gnsssdr_16ic_x2_dot_prod_16ic_xn, test_params))
+        (VOLK_INIT_PUPP(volk_gnsssdr_16ic_x2_rotator_dotprodxnpuppet_16ic, volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn, test_params))
         ;
 
     return test_cases;
diff --git a/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.cc b/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.cc
index 05a4f20..8cbc574 100644
--- a/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.cc
+++ b/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.cc
@@ -44,18 +44,18 @@ bool cpu_multicorrelator_16sc::init(
     // ALLOCATE MEMORY FOR INTERNAL vectors
     size_t size = max_signal_length_samples * sizeof(lv_16sc_t);
 
-    // NCO signal
-    d_nco_in = static_cast<lv_16sc_t*>(volk_gnsssdr_malloc(size, volk_gnsssdr_get_alignment()));
-
-    // Doppler-free signal
-    d_sig_doppler_wiped = static_cast<lv_16sc_t*>(volk_gnsssdr_malloc(size, volk_gnsssdr_get_alignment()));
+    // NCO signal (not needed if the rotator+dot_product kernel is used)
+    //d_nco_in = static_cast<lv_16sc_t*>(volk_gnsssdr_malloc(size, volk_gnsssdr_get_alignment()));
+    // Doppler-free signal (not needed if the rotator+dot_product kernel is used)
+    //d_sig_doppler_wiped = static_cast<lv_16sc_t*>(volk_gnsssdr_malloc(size, volk_gnsssdr_get_alignment()));
 
+    d_n_correlators = n_correlators;
+	d_tmp_code_phases_chips = static_cast<float*>(volk_gnsssdr_malloc(n_correlators*sizeof(float), volk_gnsssdr_get_alignment()));
     d_local_codes_resampled = new lv_16sc_t*[n_correlators];
     for (int n = 0; n < n_correlators; n++)
         {
             d_local_codes_resampled[n] = static_cast<lv_16sc_t*>(volk_gnsssdr_malloc(size, volk_gnsssdr_get_alignment()));
         }
-    d_n_correlators = n_correlators;
     return true;
 }
 
@@ -81,26 +81,21 @@ bool cpu_multicorrelator_16sc::set_input_output_vectors(lv_16sc_t* corr_out, con
     return true;
 }
 
-
-
 void cpu_multicorrelator_16sc::update_local_code(int correlator_length_samples,float rem_code_phase_chips, float code_phase_step_chips)
 {
-	float *tmp_code_phases_chips;
-	tmp_code_phases_chips = static_cast<float*>(volk_gnsssdr_malloc(d_n_correlators*sizeof(float), volk_gnsssdr_get_alignment()));
+
 	for (int n = 0; n < d_n_correlators; n++)
 	{
-		tmp_code_phases_chips[n] = d_shifts_chips[n] - rem_code_phase_chips;
+		d_tmp_code_phases_chips[n] = d_shifts_chips[n] - rem_code_phase_chips;
 	}
 
 	volk_gnsssdr_16ic_xn_resampler_16ic_xn(d_local_codes_resampled,
 			d_local_code_in,
-			tmp_code_phases_chips,
+			d_tmp_code_phases_chips,
 			code_phase_step_chips,
 			correlator_length_samples,
 			d_n_correlators,
 			d_code_length_chips);
-
-	volk_gnsssdr_free(tmp_code_phases_chips);
 }
 
 
@@ -111,11 +106,14 @@ bool cpu_multicorrelator_16sc::Carrier_wipeoff_multicorrelator_resampler(
         float code_phase_step_chips,
         int signal_length_samples)
 {
+    update_local_code(signal_length_samples, rem_code_phase_chips, code_phase_step_chips);
     lv_32fc_t phase_offset_as_complex[1];
     phase_offset_as_complex[0] = lv_cmake(std::cos(rem_carrier_phase_in_rad), -std::sin(rem_carrier_phase_in_rad));
-    volk_gnsssdr_16ic_s32fc_x2_rotator_16ic(d_sig_doppler_wiped, d_sig_in, std::exp(lv_32fc_t(0, -phase_step_rad)), phase_offset_as_complex, signal_length_samples);
-    update_local_code(signal_length_samples, rem_code_phase_chips, code_phase_step_chips);
-    volk_gnsssdr_16ic_x2_dot_prod_16ic_xn(d_corr_out, d_sig_doppler_wiped, (const lv_16sc_t**)d_local_codes_resampled, d_n_correlators,  signal_length_samples);
+    //replaced by integrated rotator + dot_product kernel
+    //volk_gnsssdr_16ic_s32fc_x2_rotator_16ic(d_sig_doppler_wiped, d_sig_in, std::exp(lv_32fc_t(0, -phase_step_rad)), phase_offset_as_complex, signal_length_samples);
+    //volk_gnsssdr_16ic_x2_dot_prod_16ic_xn(d_corr_out, d_sig_doppler_wiped, (const lv_16sc_t**)d_local_codes_resampled, d_n_correlators,  signal_length_samples);
+    volk_gnsssdr_16ic_x2_rotator_dot_prod_16ic_xn(d_corr_out, d_sig_in, std::exp(lv_32fc_t(0, -phase_step_rad)), phase_offset_as_complex, (const lv_16sc_t**)d_local_codes_resampled, d_n_correlators,  signal_length_samples);
+
     return true;
 }
 
@@ -123,8 +121,8 @@ bool cpu_multicorrelator_16sc::Carrier_wipeoff_multicorrelator_resampler(
 cpu_multicorrelator_16sc::cpu_multicorrelator_16sc()
 {
     d_sig_in = NULL;
-    d_nco_in = NULL;
-    d_sig_doppler_wiped = NULL;
+    //d_nco_in = NULL;
+    //d_sig_doppler_wiped = NULL;
     d_local_code_in = NULL;
     d_shifts_chips = NULL;
     d_corr_out = NULL;
@@ -136,8 +134,9 @@ cpu_multicorrelator_16sc::cpu_multicorrelator_16sc()
 bool cpu_multicorrelator_16sc::free()
 {
     // Free memory
-    if (d_sig_doppler_wiped != NULL) volk_gnsssdr_free(d_sig_doppler_wiped);
-    if (d_nco_in != NULL) volk_gnsssdr_free(d_nco_in);
+    //if (d_sig_doppler_wiped != NULL) volk_gnsssdr_free(d_sig_doppler_wiped);
+    //if (d_nco_in != NULL) volk_gnsssdr_free(d_nco_in);
+	if (d_tmp_code_phases_chips != NULL) volk_gnsssdr_free(d_tmp_code_phases_chips);
     for (int n = 0; n < d_n_correlators; n++)
         {
             volk_gnsssdr_free(d_local_codes_resampled[n]);
diff --git a/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.h b/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.h
index 1c5163c..6a60821 100644
--- a/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.h
+++ b/src/algorithms/tracking/libs/cpu_multicorrelator_16sc.h
@@ -55,9 +55,10 @@ public:
 private:
     // Allocate the device input vectors
     const lv_16sc_t *d_sig_in;
-    lv_16sc_t *d_nco_in;
+    //lv_16sc_t *d_nco_in;
+    float *d_tmp_code_phases_chips;
     lv_16sc_t **d_local_codes_resampled;
-    lv_16sc_t *d_sig_doppler_wiped;
+    //lv_16sc_t *d_sig_doppler_wiped;
     const lv_16sc_t *d_local_code_in;
     lv_16sc_t *d_corr_out;
     float *d_shifts_chips;

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-hamradio/gnss-sdr.git



More information about the pkg-hamradio-commits mailing list