summaryrefslogtreecommitdiff
path: root/apps/dsp.h
diff options
context:
space:
mode:
authorMaurus Cuelenaere <mcuelenaere@gmail.com>2009-07-04 13:17:58 +0000
committerMaurus Cuelenaere <mcuelenaere@gmail.com>2009-07-04 13:17:58 +0000
commitc3bc8fda8019c69c1bf9cd74539df07db527eebc (patch)
tree7bab3843bfe24cbdbb5153baba12827bcd755a72 /apps/dsp.h
parent861b8d8606059de2f7527e9429dc109e8b89c03c (diff)
downloadrockbox-c3bc8fda8019c69c1bf9cd74539df07db527eebc.zip
rockbox-c3bc8fda8019c69c1bf9cd74539df07db527eebc.tar.gz
rockbox-c3bc8fda8019c69c1bf9cd74539df07db527eebc.tar.bz2
rockbox-c3bc8fda8019c69c1bf9cd74539df07db527eebc.tar.xz
Revert "Consolidate all fixed point math routines in one library (FS#10400) by Jeffrey Goode"
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@21635 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps/dsp.h')
-rw-r--r--apps/dsp.h80
1 files changed, 80 insertions, 0 deletions
diff --git a/apps/dsp.h b/apps/dsp.h
index 3d24b24..8c23c30 100644
--- a/apps/dsp.h
+++ b/apps/dsp.h
@@ -64,6 +64,86 @@ enum {
DSP_CALLBACK_SET_STEREO_WIDTH
};
+/* A bunch of fixed point assembler helper macros */
+#if defined(CPU_COLDFIRE)
+/* These macros use the Coldfire EMAC extension and need the MACSR flags set
+ * to fractional mode with no rounding.
+ */
+
+/* Multiply two S.31 fractional integers and return the sign bit and the
+ * 31 most significant bits of the result.
+ */
+#define FRACMUL(x, y) \
+({ \
+ long t; \
+ asm ("mac.l %[a], %[b], %%acc0\n\t" \
+ "movclr.l %%acc0, %[t]\n\t" \
+ : [t] "=r" (t) : [a] "r" (x), [b] "r" (y)); \
+ t; \
+})
+
+/* Multiply two S.31 fractional integers, and return the 32 most significant
+ * bits after a shift left by the constant z. NOTE: Only works for shifts of
+ * 1 to 8 on Coldfire!
+ */
+#define FRACMUL_SHL(x, y, z) \
+({ \
+ long t, t2; \
+ asm ("mac.l %[a], %[b], %%acc0\n\t" \
+ "moveq.l %[d], %[t]\n\t" \
+ "move.l %%accext01, %[t2]\n\t" \
+ "and.l %[mask], %[t2]\n\t" \
+ "lsr.l %[t], %[t2]\n\t" \
+ "movclr.l %%acc0, %[t]\n\t" \
+ "asl.l %[c], %[t]\n\t" \
+ "or.l %[t2], %[t]\n\t" \
+ : [t] "=&d" (t), [t2] "=&d" (t2) \
+ : [a] "r" (x), [b] "r" (y), [mask] "d" (0xff), \
+ [c] "i" ((z)), [d] "i" (8 - (z))); \
+ t; \
+})
+
+#elif defined(CPU_ARM)
+
+/* Multiply two S.31 fractional integers and return the sign bit and the
+ * 31 most significant bits of the result.
+ */
+#define FRACMUL(x, y) \
+({ \
+ long t, t2; \
+ asm ("smull %[t], %[t2], %[a], %[b]\n\t" \
+ "mov %[t2], %[t2], asl #1\n\t" \
+ "orr %[t], %[t2], %[t], lsr #31\n\t" \
+ : [t] "=&r" (t), [t2] "=&r" (t2) \
+ : [a] "r" (x), [b] "r" (y)); \
+ t; \
+})
+
+/* Multiply two S.31 fractional integers, and return the 32 most significant
+ * bits after a shift left by the constant z.
+ */
+#define FRACMUL_SHL(x, y, z) \
+({ \
+ long t, t2; \
+ asm ("smull %[t], %[t2], %[a], %[b]\n\t" \
+ "mov %[t2], %[t2], asl %[c]\n\t" \
+ "orr %[t], %[t2], %[t], lsr %[d]\n\t" \
+ : [t] "=&r" (t), [t2] "=&r" (t2) \
+ : [a] "r" (x), [b] "r" (y), \
+ [c] "M" ((z) + 1), [d] "M" (31 - (z))); \
+ t; \
+})
+
+#else
+
+#define FRACMUL(x, y) (long) (((((long long) (x)) * ((long long) (y))) >> 31))
+#define FRACMUL_SHL(x, y, z) \
+((long)(((((long long) (x)) * ((long long) (y))) >> (31 - (z)))))
+
+#endif
+
+#define DIV64(x, y, z) (long)(((long long)(x) << (z))/(y))
+
struct dsp_config;
int dsp_process(struct dsp_config *dsp, char *dest,