1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
|
/********************************************************************
* *
* THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
* *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
* *
********************************************************************
function: arm7 and later wide math functions
********************************************************************/
#ifdef _ARM_ASSEM_
#if ARM_ARCH < 6
#define INCL_OPTIMIZED_VECTOR_FMUL_WINDOW
static inline void ff_vector_fmul_window_c(ogg_int32_t *dst, const ogg_int32_t *src0,
const ogg_int32_t *src1, const ogg_int32_t *win, int len)
{
/* len is always a power of 2 and always >= 16 so this is unrolled 2 times*/
ogg_int32_t *dst0 = dst, *dst1 = dst + 2*len;
const ogg_int32_t *win0 = win, *win1 = win + 2*len;
src1 += len;
asm volatile ("cmp %[len], #0\n\t"
"b 1f\n\t"
"0:\n\t"
"ldr r0, [%[src0]], #4\n\t"
"ldr r1, [%[src1], #-4]!\n\t"
"ldr r2, [%[win0]], #4\n\t"
"ldr r3, [%[win1], #-4]!\n\t"
"smull r4, r5, r0, r2\n\t"
"smlal r4, r5, r1, r3\n\t"
"rsb r2, r2, #0\n\t"
"lsl r5, r5, #1\n\t"
"str r5, [%[dst1], #-4]!\n\t"
"smull r4, r5, r0, r3\n\t"
"smlal r4, r5, r1, r2\n\t"
"ldr r0, [%[src0]], #4\n\t"
"ldr r1, [%[src1], #-4]!\n\t"
"lsl r5, r5, #1\n\t"
"str r5, [%[dst0]], #4\n\t"
"ldr r2, [%[win0]], #4\n\t"
"ldr r3, [%[win1], #-4]!\n\t"
"smull r4, r5, r0, r2\n\t"
"smlal r4, r5, r1, r3\n\t"
"rsb r2, r2, #0\n\t"
"lsl r5, r5, #1\n\t"
"str r5, [%[dst1], #-4]!\n\t"
"smull r4, r5, r0, r3\n\t"
"smlal r4, r5, r1, r2\n\t"
"subs %[len], %[len], #2\n\t"
"lsl r5, r5, #1\n\t"
"str r5, [%[dst0]], #4\n\t"
"1:\n\t"
"bgt 0b\n\t"
: [dst0] "+r" (dst0), [dst1] "+r" (dst1),
[src0] "+r" (src0), [src1] "+r" (src1),
[win0] "+r" (win0), [win1] "+r" (win1),
[len] "+r" (len)
:: "r0", "r1", "r2", "r3", "r4", "r5", "cc", "memory");
}
#endif
#ifndef _V_LSP_MATH_ASM
#define _V_LSP_MATH_ASM
static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
ogg_int32_t *qexpp,
ogg_int32_t *ilsp,ogg_int32_t wi,
ogg_int32_t m){
ogg_uint32_t qi=*qip,pi=*pip;
ogg_int32_t qexp=*qexpp;
asm("mov r0,%3;"
"movs r1,%5,asr#1;"
"add r0,r0,r1,lsl#3;"
"beq 2f;\n"
"1:"
"ldmdb r0!,{r1,r3};"
"subs r1,r1,%4;" //ilsp[j]-wi
"rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
"umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
"subs r1,r3,%4;" //ilsp[j+1]-wi
"rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
"umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
"cmn r2,r3;" // shift down 16?
"beq 0f;"
"add %2,%2,#16;"
"mov %0,%0,lsr #16;"
"orr %0,%0,r2,lsl #16;"
"mov %1,%1,lsr #16;"
"orr %1,%1,r3,lsl #16;"
"0:"
"cmp r0,%3;\n"
"bhi 1b;\n"
"2:"
// odd filter assymetry
"ands r0,%5,#1;\n"
"beq 3f;\n"
"add r0,%3,%5,lsl#2;\n"
"ldr r1,[r0,#-4];\n"
"mov r0,#0x4000;\n"
"subs r1,r1,%4;\n" //ilsp[j]-wi
"rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
"umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
"umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
"cmn r2,r3;\n" // shift down 16?
"beq 3f;\n"
"add %2,%2,#16;\n"
"mov %0,%0,lsr #16;\n"
"orr %0,%0,r2,lsl #16;\n"
"mov %1,%1,lsr #16;\n"
"orr %1,%1,r3,lsl #16;\n"
//qi=(pi>>shift)*labs(ilsp[j]-wi);
//pi=(qi>>shift)*labs(ilsp[j+1]-wi);
//qexp+=shift;
//}
/* normalize to max 16 sig figs */
"3:"
"mov r2,#0;"
"orr r1,%0,%1;"
"tst r1,#0xff000000;"
"addne r2,r2,#8;"
"movne r1,r1,lsr #8;"
"tst r1,#0x00f00000;"
"addne r2,r2,#4;"
"movne r1,r1,lsr #4;"
"tst r1,#0x000c0000;"
"addne r2,r2,#2;"
"movne r1,r1,lsr #2;"
"tst r1,#0x00020000;"
"addne r2,r2,#1;"
"movne r1,r1,lsr #1;"
"tst r1,#0x00010000;"
"addne r2,r2,#1;"
"mov %0,%0,lsr r2;"
"mov %1,%1,lsr r2;"
"add %2,%2,r2;"
: "+r"(qi),"+r"(pi),"+r"(qexp)
: "r"(ilsp),"r"(wi),"r"(m)
: "r0","r1","r2","r3","cc");
*qip=qi;
*pip=pi;
*qexpp=qexp;
}
static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){
ogg_uint32_t qi=*qip;
ogg_int32_t qexp=*qexpp;
asm("tst %0,#0x0000ff00;"
"moveq %0,%0,lsl #8;"
"subeq %1,%1,#8;"
"tst %0,#0x0000f000;"
"moveq %0,%0,lsl #4;"
"subeq %1,%1,#4;"
"tst %0,#0x0000c000;"
"moveq %0,%0,lsl #2;"
"subeq %1,%1,#2;"
"tst %0,#0x00008000;"
"moveq %0,%0,lsl #1;"
"subeq %1,%1,#1;"
: "+r"(qi),"+r"(qexp)
:
: "cc");
*qip=qi;
*qexpp=qexp;
}
#endif
#endif
|