22 #include "libavutil/mem.h"
54 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
59 #if HAVE_SSE_INLINE && HAVE_7REGS
64 #define MIX5(mono, stereo) \
66 "movss 0(%1), %%xmm5 \n" \
67 "movss 8(%1), %%xmm6 \n" \
68 "movss 24(%1), %%xmm7 \n" \
69 "shufps $0, %%xmm5, %%xmm5 \n" \
70 "shufps $0, %%xmm6, %%xmm6 \n" \
71 "shufps $0, %%xmm7, %%xmm7 \n" \
73 "movaps (%0, %2), %%xmm0 \n" \
74 "movaps (%0, %3), %%xmm1 \n" \
75 "movaps (%0, %4), %%xmm2 \n" \
76 "movaps (%0, %5), %%xmm3 \n" \
77 "movaps (%0, %6), %%xmm4 \n" \
78 "mulps %%xmm5, %%xmm0 \n" \
79 "mulps %%xmm6, %%xmm1 \n" \
80 "mulps %%xmm5, %%xmm2 \n" \
81 "mulps %%xmm7, %%xmm3 \n" \
82 "mulps %%xmm7, %%xmm4 \n" \
83 stereo("addps %%xmm1, %%xmm0 \n") \
84 "addps %%xmm1, %%xmm2 \n" \
85 "addps %%xmm3, %%xmm0 \n" \
86 "addps %%xmm4, %%xmm2 \n" \
87 mono("addps %%xmm2, %%xmm0 \n") \
88 "movaps %%xmm0, (%0, %2) \n" \
89 stereo("movaps %%xmm2, (%0, %3) \n") \
94 "r"(samples[0] + len), \
95 "r"(samples[1] + len), \
96 "r"(samples[2] + len), \
97 "r"(samples[3] + len), \
98 "r"(samples[4] + len) \
99 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
100 "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
104 #define MIX_MISC(stereo) \
108 "mov -%c7(%6, %2, %c8), %3 \n" \
109 "movaps (%3, %0), %%xmm0 \n" \
110 stereo("movaps %%xmm0, %%xmm1 \n") \
111 "mulps %%xmm4, %%xmm0 \n" \
112 stereo("mulps %%xmm5, %%xmm1 \n") \
114 "mov (%6, %2, %c8), %1 \n" \
115 "movaps (%1, %0), %%xmm2 \n" \
116 stereo("movaps %%xmm2, %%xmm3 \n") \
117 "mulps (%4, %2, 8), %%xmm2 \n" \
118 stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
119 "addps %%xmm2, %%xmm0 \n" \
120 stereo("addps %%xmm3, %%xmm1 \n") \
124 stereo("mov (%6, %2, %c8), %1 \n") \
125 "movaps %%xmm0, (%3, %0) \n" \
126 stereo("movaps %%xmm1, (%1, %0) \n") \
129 : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
130 : "r"(matrix_simd + in_ch), \
131 "g"((intptr_t) - 4 * (in_ch - 1)), \
133 "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
137 static void ac3_downmix_sse(
float **samples,
float (*matrix)[2],
138 int out_ch,
int in_ch,
int len)
140 int (*matrix_cmp)[2] = (int(*)[2])matrix;
143 i = -len *
sizeof(float);
144 if (in_ch == 5 && out_ch == 2 &&
145 !(matrix_cmp[0][1] | matrix_cmp[2][0] |
146 matrix_cmp[3][1] | matrix_cmp[4][0] |
147 (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
148 (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
150 }
else if (in_ch == 5 && out_ch == 1 &&
151 matrix_cmp[0][0] == matrix_cmp[2][0] &&
152 matrix_cmp[3][0] == matrix_cmp[4][0]) {
158 for (j = 0; j < in_ch; j++)
159 samp[j] = samples[j] + len;
161 j = 2 * in_ch *
sizeof(float);
165 "movss (%2, %0), %%xmm4 \n"
166 "movss 4(%2, %0), %%xmm5 \n"
167 "shufps $0, %%xmm4, %%xmm4 \n"
168 "shufps $0, %%xmm5, %%xmm5 \n"
169 "movaps %%xmm4, (%1, %0, 4) \n"
170 "movaps %%xmm5, 16(%1, %0, 4) \n"
173 :
"r"(matrix_simd),
"r"(matrix)
226 #if HAVE_SSE_INLINE && HAVE_7REGS
void(* ac3_rshift_int32)(int32_t *src, unsigned int len, unsigned int shift)
Right-shift each value in an array of int32_t by a specified amount.
#define EXTERNAL_SSE(flags)
void(* float_to_fixed24)(int32_t *dst, const float *src, unsigned int len)
Convert an array of float in range [-1.0,1.0] to int32_t with range [-(1<<24),(1<<24)].
static int shift(int a, int b)
int ff_ac3_max_msb_abs_int16_sse2(const int16_t *src, int len)
int ff_ac3_max_msb_abs_int16_mmx(const int16_t *src, int len)
#define DECLARE_ALIGNED(n, t, v)
void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len)
void ff_float_to_fixed24_sse2(int32_t *dst, const float *src, unsigned int len)
void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs)
int(* ac3_max_msb_abs_int16)(const int16_t *src, int len)
Calculate the maximum MSB of the absolute value of each element in an array of int16_t.
void ff_ac3_rshift_int32_mmx(int32_t *src, unsigned int len, unsigned int shift)
void ff_ac3_lshift_int16_mmx(int16_t *src, unsigned int len, unsigned int shift)
void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift)
#define EXTERNAL_SSSE3(flags)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
void ff_ac3_exponent_min_sse2(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
#define AC3_MAX_CHANNELS
maximum number of channels, including coupling channel
void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift)
void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs)
int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len)
#define EXTERNAL_MMX(flags)
#define EXTERNAL_AMD3DNOW(flags)
#define AV_CPU_FLAG_ATOM
Atom processor, some SSSE3 instructions are slower.
void(* extract_exponents)(uint8_t *exp, int32_t *coef, int nb_coefs)
void ff_ac3_exponent_min_mmx(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len)
void ff_float_to_fixed24_sse(int32_t *dst, const float *src, unsigned int len)
int(* compute_mantissa_size)(uint16_t mant_cnt[6][16])
Calculate the number of bits needed to encode a set of mantissas.
#define EXTERNAL_SSE2(flags)
#define EXTERNAL_MMXEXT(flags)
void(* ac3_lshift_int16)(int16_t *src, unsigned int len, unsigned int shift)
Left-shift each value in an array of int16_t by a specified amount.
void(* ac3_exponent_min)(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
Set each encoded exponent in a block to the minimum of itself and the exponents in the same frequency...
int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16])
void(* downmix)(float **samples, float(*matrix)[2], int out_ch, int in_ch, int len)
#define AV_CPU_FLAG_SSE2SLOW
SSE2 supported, but usually not faster.
#define INLINE_SSE(flags)
void ff_ac3_extract_exponents_sse2(uint8_t *exp, int32_t *coef, int nb_coefs)
Common code between the AC-3 encoder and decoder.