Solve FS#12396 through rolling back r26592. This fixes distortions while mpc playback on Coldfire targets with GCC 4.5.2.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@31054 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Andree Buschmann 2011-11-25 19:42:26 +00:00
parent 65c46e8d77
commit fabbeba59a

View file

@ -57,49 +57,63 @@
#define MPC_SHR_RND(X, Y) ((X+(1<<(Y-1)))>>Y) #define MPC_SHR_RND(X, Y) ((X+(1<<(Y-1)))>>Y)
#if defined(CPU_COLDFIRE) #if defined(CPU_COLDFIRE)
/* Calculate: result = (X*Y)>>14 */
#define MPC_MULTIPLY(X,Y) \
({ \
MPC_SAMPLE_FORMAT t1; \
MPC_SAMPLE_FORMAT t2; \
asm volatile ( \
"mac.l %[x],%[y],%%acc0\n\t" /* multiply */ \
"mulu.l %[y],%[x] \n\t" /* get lower half, avoid emac stall */ \
"movclr.l %%acc0,%[t1] \n\t" /* get higher half */ \
"moveq.l #17,%[t2] \n\t" \
"asl.l %[t2],%[t1] \n\t" /* hi <<= 17, plus one free */ \
"moveq.l #14,%[t2] \n\t" \
"lsr.l %[t2],%[x] \n\t" /* (unsigned)lo >>= 14 */ \
"or.l %[x],%[t1] \n" /* combine result */ \
: [t1]"=&d"(t1), [t2]"=&d"(t2) \
: [x]"d"((X)), [y] "d"((Y))); \
t1; \
})
/* Calculate: result = (X*Y)>>Z */ #define MPC_MULTIPLY(X,Y) mpc_multiply((X), (Y))
#define MPC_MULTIPLY_EX(X,Y,Z) \ #define MPC_MULTIPLY_EX(X,Y,Z) mpc_multiply_ex((X), (Y), (Z))
({ \
MPC_SAMPLE_FORMAT t1; \ static inline MPC_SAMPLE_FORMAT mpc_multiply(MPC_SAMPLE_FORMAT x,
MPC_SAMPLE_FORMAT t2; \ MPC_SAMPLE_FORMAT y)
asm volatile ( \ {
"mac.l %[x],%[y],%%acc0\n\t" /* multiply */ \ MPC_SAMPLE_FORMAT t1, t2;
"mulu.l %[y],%[x] \n\t" /* get lower half, avoid emac stall */ \ asm volatile (
"movclr.l %%acc0,%[t1] \n\t" /* get higher half */ \ "mac.l %[x],%[y],%%acc0\n" /* multiply */
"moveq.l #31,%[t2] \n\t" \ "mulu.l %[y],%[x] \n" /* get lower half, avoid emac stall */
"sub.l %[sh],%[t2] \n\t" /* t2 = 31 - shift */ \ "movclr.l %%acc0,%[t1] \n" /* get higher half */
"ble.s 1f \n\t" \ "moveq.l #17,%[t2] \n"
"asl.l %[t2],%[t1] \n\t" /* hi <<= 31 - shift */ \ "asl.l %[t2],%[t1] \n" /* hi <<= 17, plus one free */
"lsr.l %[sh],%[x] \n\t" /* (unsigned)lo >>= shift */ \ "moveq.l #14,%[t2] \n"
"or.l %[x],%[t1] \n\t" /* combine result */ \ "lsr.l %[t2],%[x] \n" /* (unsigned)lo >>= 14 */
"bra.s 2f \n\t" \ "or.l %[x],%[t1] \n" /* combine result */
"1: \n\t" \ : /* outputs */
"neg.l %[t2] \n\t" /* t2 = shift - 31 */ \ [t1]"=&d"(t1),
"asr.l %[t2],%[t1] \n\t" /* hi >>= t2 */ \ [t2]"=&d"(t2),
"2: \n" \ [x] "+d" (x)
: [t1]"=&d"(t1), [t2]"=&d"(t2) \ : /* inputs */
: [x] "d"((X)), [y] "d"((Y)), [sh]"d"((Z))); \ [y] "d" (y)
t1; \ );
}) return t1;
}
static inline MPC_SAMPLE_FORMAT mpc_multiply_ex(MPC_SAMPLE_FORMAT x,
MPC_SAMPLE_FORMAT y,
unsigned shift)
{
MPC_SAMPLE_FORMAT t1, t2;
asm volatile (
"mac.l %[x],%[y],%%acc0\n" /* multiply */
"mulu.l %[y],%[x] \n" /* get lower half, avoid emac stall */
"movclr.l %%acc0,%[t1] \n" /* get higher half */
"moveq.l #31,%[t2] \n"
"sub.l %[sh],%[t2] \n" /* t2 = 31 - shift */
"ble.s 1f \n"
"asl.l %[t2],%[t1] \n" /* hi <<= 31 - shift */
"lsr.l %[sh],%[x] \n" /* (unsigned)lo >>= shift */
"or.l %[x],%[t1] \n" /* combine result */
"bra.s 2f \n"
"1: \n"
"neg.l %[t2] \n" /* t2 = shift - 31 */
"asr.l %[t2],%[t1] \n" /* hi >>= t2 */
"2: \n"
: /* outputs */
[t1]"=&d"(t1),
[t2]"=&d"(t2),
[x] "+d" (x)
: /* inputs */
[y] "d" (y),
[sh]"d" (shift)
);
return t1;
}
#elif defined(CPU_ARM) #elif defined(CPU_ARM)
/* Calculate: result = (X*Y)>>14 */ /* Calculate: result = (X*Y)>>14 */
#define MPC_MULTIPLY(X,Y) \ #define MPC_MULTIPLY(X,Y) \