summaryrefslogtreecommitdiff
path: root/apps/plugins/mpegplayer/idct_armv6.S
blob: 73feed4785785f7bb5bc6c5cc21608a518e43dbb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
/***************************************************************************
 *             __________               __   ___.
 *   Open      \______   \ ____   ____ |  | _\_ |__   _______  ___
 *   Source     |       _//  _ \_/ ___\|  |/ /| __ \ /  _ \  \/  /
 *   Jukebox    |    |   (  <_> )  \___|    < | \_\ (  <_> > <  <
 *   Firmware   |____|_  /\____/ \___  >__|_ \|___  /\____/__/\_ \
 *                     \/            \/     \/    \/            \/
 * $Id$
 *
 * Copyright (C) 2009 by Jens Arnold
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
 * KIND, either express or implied.
 *
 ****************************************************************************/


    .global     mpeg2_idct_copy
    .type       mpeg2_idct_copy, %function
    .global     mpeg2_idct_add
    .type       mpeg2_idct_add, %function

/* Custom calling convention:
 * r0 contains block pointer and is non-volatile
 * all non-volatile c context saved and restored on its behalf
 */
.idct:
    str     lr, [sp, #-4]!      @ lr is used
    add     r1, r0, #128        @ secondary, transposed temp buffer
    mov     r14, #8             @ loop counter

.row_loop:
    ldmia   r0!, {r2, r3, r10, r11} @ fetch f0, f2, f4, f6, f1, f3, f5, f7
    ldrd    r4, L_W1357         @ load  W1, W3, W5, W7

    smuad   r6, r4, r10         @ b0 = W1 * f1 + W3 * f3
    smlad   r6, r5, r11, r6     @    + W5 * f5 + W7 * f7

    smultt  r7, r5, r10         @ b1 = -W7 * f3
    smlabb  r7, r4, r11, r7     @    + -W1 * f5
    smlabt  r7, r5, r11, r7     @    + -W5 * f7
    rsb     r7, r7, #0
    smlatb  r7, r4, r10, r7     @    + W3 * f1

    smulbt  r8, r4, r10         @ b2 = -W1 * f3
    rsb     r8, r8, #0
    smlabb  r8, r5, r10, r8     @    + W5 * f1
    smlatb  r8, r5, r11, r8     @    + W7 * f5
    smlatt  r8, r4, r11, r8     @    + W3 * f7

    smusdx  r9, r10, r5         @ b3 = f1 * W7 - f3 * W5
    smlsdx  r9, r11, r4, r9     @    + f5 * W3 - f1 * W1

    ldrd    r4, L_W0246         @ load  W0, W2, W4, W6
    add     r2, r2, #1          @ f0 += 1

    smulbb  r10, r4, r2         @ a0' = W0 * f0
    smlabb  r10, r5, r3, r10    @     + W4 * f4
    smultt  r12, r4, r2         @ a3' = W2 * f2
    smlatt  r12, r5, r3, r12    @     + W6 * f6
    add     r10, r10, r12       @ a0  = a0' + a3'
    sub     r12, r10, r12, lsl #1  @ a3  = a0 - 2 * a3'

    smulbb  r11, r5, r3         @ a1' = -W4 * f4
    rsb     r11, r11, #0
    smlabb  r11, r4, r2, r11    @     + W0 * f0
    smultt  r3, r4, r3          @ a2' = -W2 * f6
    rsb     r3, r3, #0
    smlatt  r3, r5, r2, r3      @     + W6 * f2
    add     r11, r11, r3        @ a1  = a1' + a2'
    sub     r3, r11, r3, lsl #1 @ a2  = a1 - 2 * a2'
    
    sub     r2, r10, r6         @ block[7] = (a0 - b0)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #7*16]
    sub     r2, r11, r7         @ block[6] = (a1 - b1)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #6*16]
    sub     r2, r3, r8          @ block[5] = (a2 - b2)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #5*16]
    sub     r2, r12, r9         @ block[4] = (a3 - b3)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #4*16]
    add     r2, r12, r9         @ block[3] = (a3 + b3)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #3*16]
    add     r2, r3, r8          @ block[2] = (a2 + b2)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #2*16]
    add     r2, r11, r7         @ block[1] = (a1 + b1)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #1*16]
    add     r2, r10, r6         @ block[0] = (a0 + b0)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1], #2        @ advance to next temp column
    
    subs    r14, r14, #1
    bne     .row_loop
    b       .col_start

    @placed here because of ldrd's offset limit
L_W1357:
    .short  2841
    .short  2408
    .short  1609
    .short   565

L_W0246:
    .short  2048
    .short  2676
    .short  2048
    .short  1108

.col_start:
    @ r0 now points to the temp buffer, where we need it.
    sub     r1, r1, #128+16     @ point r1 back to the input block
    mov     r14, #8             @ loop counter

.col_loop:
    ldmia   r0!, {r2, r3, r10, r11} @ fetch f0, f2, f4, f6, f1, f3, f5, f7
    ldrd    r4, L_W1357         @ load  W1, W3, W5, W7

    smuad   r6, r4, r10         @ b0 = W1 * f1 + W3 * f3
    smlad   r6, r5, r11, r6     @    + W5 * f5 + W7 * f7

    smultt  r7, r5, r10         @ b1 = -W7 * f3
    smlabb  r7, r4, r11, r7     @    + -W1 * f5
    smlabt  r7, r5, r11, r7     @    + -W5 * f7
    rsb     r7, r7, #0
    smlatb  r7, r4, r10, r7     @    + W3 * f1

    smulbt  r8, r4, r10         @ b2 = -W1 * f3
    rsb     r8, r8, #0
    smlabb  r8, r5, r10, r8     @    + W5 * f1
    smlatb  r8, r5, r11, r8     @    + W7 * f5
    smlatt  r8, r4, r11, r8     @    + W3 * f7

    smusdx  r9, r10, r5         @ b3 = f1 * W7 - f3 * W5
    smlsdx  r9, r11, r4, r9     @    + f5 * W3 - f1 * W1

    ldrd    r4, L_W0246         @ load  W0, W2, W4, W6
    add     r2, r2, #32         @ DC offset: 0.5

    smulbb  r10, r4, r2         @ a0' = W0 * f0
    smlabb  r10, r5, r3, r10    @     + W4 * f4
    smultt  r12, r4, r2         @ a3' = W2 * f2
    smlatt  r12, r5, r3, r12    @     + W6 * f6
    add     r10, r10, r12       @ a0  = a0' + a3'
    sub     r12, r10, r12, lsl #1  @ a3  = a0 - 2 * a3'

    smulbb  r11, r5, r3         @ a1' = -W4 * f4
    rsb     r11, r11, #0
    smlabb  r11, r4, r2, r11    @     + W0 * f0
    smultt  r3, r4, r3          @ a2' = -W2 * f6
    rsb     r3, r3, #0
    smlatt  r3, r5, r2, r3      @     + W6 * f2
    add     r11, r11, r3        @ a1  = a1' + a2'
    sub     r3, r11, r3, lsl #1 @ a2  = a1 - 2 * a2'
    
    sub     r2, r10, r6         @ block[7] = (a0 - b0)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #7*16]
    sub     r2, r11, r7         @ block[6] = (a1 - b1)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #6*16]
    sub     r2, r3, r8          @ block[5] = (a2 - b2)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #5*16]
    sub     r2, r12, r9         @ block[4] = (a3 - b3)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #4*16]
    add     r2, r12, r9         @ block[3] = (a3 + b3)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #3*16]
    add     r2, r3, r8          @ block[2] = (a2 + b2)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #2*16]
    add     r2, r11, r7         @ block[1] = (a1 + b1)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #1*16]
    add     r2, r10, r6         @ block[0] = (a0 + b0)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1], #2        @ advance to next column

    subs    r14, r14, #1
    bne     .col_loop

    sub     r0, r0, #256        @ point r0 back to the input block
    ldr     pc, [sp], #4


mpeg2_idct_copy:
    stmfd  sp!, {r1-r2, r4-r12, lr}
    bl     .idct
    ldmfd  sp!, {r1-r2}

    add    r12, r0, #128
    ldrd   r4, [r0]
    mov    r8, #0
    mov    r9, #0
    mov    r10, #0
    mov    r11, #0
1:
    ldrd   r6, [r0, #8]
    usat16 r4, #8, r4
    strb   r4, [r1, #0]
    mov    r4, r4, lsr #16
    strb   r4, [r1, #1]
    usat16 r5, #8, r5
    strb   r5, [r1, #2]
    mov    r5, r5, lsr #16
    strb   r5, [r1, #3]
    ldrd   r4, [r0, #16]
    usat16 r6, #8, r6
    strb   r6, [r1, #4]
    mov    r6, r6, lsr #16
    strb   r6, [r1, #5]
    usat16 r7, #8, r7
    strb   r7, [r1, #6]
    mov    r7, r7, lsr #16
    strb   r7, [r1, #7]
    stmia  r0!, {r8-r11}
    add    r1, r1, r2
    cmp    r0, r12
    blo    1b

    ldmfd  sp!, {r4-r12, pc}

mpeg2_idct_add:
    cmp    r0, #129
    mov    r0, r1
    ldreqsh r1, [r0, #0]
    bne    1f
    and    r1, r1, #0x70
    cmp    r1, #0x40
    bne    3f
1:
    stmfd  sp!, {r2-r12, lr}
    bl     .idct
    ldmfd  sp!, {r1-r2}
    mov    r11, #0
    add    r12, r0, #128
2:
    ldmia  r0, {r3-r6}
    ldrb   r7, [r1, #0]
    ldrb   r8, [r1, #1]
    ldrb   r9, [r1, #2]
    ldrb   r10, [r1, #3]
    str    r11, [r0], #4        
    orr    r7, r7, r8, lsl #16
    sadd16 r3, r3, r7
    usat16 r3, #8, r3
    strb   r3, [r1, #0]
    mov    r3, r3, lsr #16
    strb   r3, [r1, #1]
    str    r11, [r0], #4     
    orr    r9, r9, r10, lsl #16
    sadd16 r4, r4, r9
    usat16 r4, #8, r4
    strb   r4, [r1, #2]
    mov    r4, r4, lsr #16
    strb   r4, [r1, #3]
    ldrb   r7, [r1, #4]
    ldrb   r8, [r1, #5]
    ldrb   r9, [r1, #6]
    ldrb   r10, [r1, #7]
    str    r11, [r0], #4
    orr    r7, r7, r8, lsl #16
    sadd16 r5, r5, r7
    usat16 r5, #8, r5
    strb   r5, [r1, #4]
    mov    r5, r5, lsr #16
    strb   r5, [r1, #5]
    str    r11, [r0], #4
    orr    r9, r9, r10, lsl #16
    sadd16 r6, r6, r9
    usat16 r6, #8, r6
    strb   r6, [r1, #6]
    mov    r6, r6, lsr #16
    strb   r6, [r1, #7]
    add    r1, r1, r2
    cmp    r0, r12
    blo    2b
    ldmfd  sp!, {r4-r12, pc}

3:
    stmfd  sp!, {r4-r7}
    ldrsh  r1, [r0, #0]           /* r1 = block[0] */
    mov    r11, #0
    strh   r11, [r0, #0]          /* block[0] = 0 */
    strh   r11, [r0, #126]        /* block[63] = 0 */
    add    r1, r1, #64            /* r1 = DC << 7 */
    add    r0, r2, r3, asl #3
4:
    ldrb   r4, [r2, #0]
    ldrb   r5, [r2, #1]
    ldrb   r6, [r2, #2]
    ldrb   r7, [r2, #3]
    add    r4, r4, r1, asr #7
    usat   r4, #8, r4
    strb   r4, [r2, #0]
    add    r5, r5, r1, asr #7
    usat   r5, #8, r5
    strb   r5, [r2, #1]
    add    r6, r6, r1, asr #7
    usat   r6, #8, r6
    strb   r6, [r2, #2]
    add    r7, r7, r1, asr #7
    usat   r7, #8, r7
    strb   r7, [r2, #3]
    ldrb   r4, [r2, #4]
    ldrb   r5, [r2, #5]
    ldrb   r6, [r2, #6]
    ldrb   r7, [r2, #7]
    add    r4, r4, r1, asr #7
    usat   r4, #8, r4
    strb   r4, [r2, #4]
    add    r5, r5, r1, asr #7
    usat   r5, #8, r5
    strb   r5, [r2, #5]
    add    r6, r6, r1, asr #7
    usat   r6, #8, r6
    strb   r6, [r2, #6]
    add    r7, r7, r1, asr #7
    usat   r7, #8, r7
    strb   r7, [r2, #7]
    add    r2, r2, r3
    cmp    r2, r0
    blo    4b
    ldmfd  sp!, {r4-r7}
    bx     lr