More bugs killed. Now it may even work.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@230 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Linus Nielsen Feltzing 2002-04-25 13:20:43 +00:00
parent 69d9911175
commit 46daf2b0b3

View file

@ -24,15 +24,12 @@ typedef union
{ {
unsigned int r[7]; /* Registers r8 thru r14 */ unsigned int r[7]; /* Registers r8 thru r14 */
void *sp; /* Stack pointer (r15) */ void *sp; /* Stack pointer (r15) */
unsigned int mach, unsigned int mach;
macl; unsigned int macl;
unsigned int sr; /* Status register */ unsigned int sr; /* Status register */
#if 0
void* gbr; /* Global base register */
#endif
void* pr; /* Procedure register */ void* pr; /* Procedure register */
} regs; } regs;
unsigned int mem[32]; unsigned int mem[12];
} ctx_t; } ctx_t;
typedef struct typedef struct
@ -50,68 +47,8 @@ static thread_t threads = {1, 0};
*/ */
static inline void stctx(void* addr) static inline void stctx(void* addr)
{ {
unsigned int tmp; asm volatile ("add #48, %0\n\t"
"sts.l pr, @-%0\n\t"
/*
[Alkorr] sorry, this code is totally wrong.
Why ?
"mov.l %0,@(imm,%1)"
must be interpreted as :
"%0 = ((long *)%1)[imm]"
not as :
"%0 = *((long *)(((char *)%1) + imm))"
real offset = "imm" x 1 if byte access (.b)
= "imm" x 2 if 16-bit word access (.w)
= "imm" x 4 if 32-bit word access (.l)
Don't forget, SH doesn't like misaligned address, so
remember it doesn't make any sense to have an odd
offset ;).
*/
#if 0
asm volatile ("mov.l r8, @(0, %1)\n\t"
"mov.l r9, @(4, %1)\n\t"
"mov.l r10, @(8, %1)\n\t"
"mov.l r11, @(12, %1)\n\t"
"add #16,%1\n\t"
"mov.l r12, @(0, %1)\n\t"
"mov.l r13, @(4, %1)\n\t"
"mov.l r14, @(8, %1)\n\t"
"mov.l r15, @(12, %1)\n\t"
"add #16,%1\n\t"
"stc sr, %0\n\t"
"mov.l %0, @(0, %1)\n\t"
"stc gbr, %0\n\t"
"mov.l %0, @(4, %1)\n\t"
"sts pr, %0\n\t"
"mov.l %0, @(8, %1)" : "=r&" (tmp) : "r" (addr));
#endif
#if 0
/* here the right code */
asm volatile ("mov.l r8, @(0,%1)\n\t"
"mov.l r9, @(1,%1)\n\t"
"mov.l r10, @(2,%1)\n\t"
"mov.l r11, @(3,%1)\n\t"
"mov.l r12, @(4,%1)\n\t"
"mov.l r13, @(5,%1)\n\t"
"mov.l r14, @(6,%1)\n\t"
"mov.l r15, @(7,%1)\n\t"
"stc.l sr, %0\n\t"
"mov.l %0, @(8,%1)\n\t"
"sts pr, %0\n\t"
"mov.l %0, @(9,%1)" : "=r&" (tmp) : "r" (addr));
#endif
/* here a far better code */
asm volatile ("sts.l pr, @-%0\n\t"
"stc.l sr, @-%0\n\t" "stc.l sr, @-%0\n\t"
"sts.l macl,@-%0\n\t" "sts.l macl,@-%0\n\t"
"sts.l mach,@-%0\n\t" "sts.l mach,@-%0\n\t"
@ -122,8 +59,7 @@ static inline void stctx(void* addr)
"mov.l r11, @-%0\n\t" "mov.l r11, @-%0\n\t"
"mov.l r10, @-%0\n\t" "mov.l r10, @-%0\n\t"
"mov.l r9, @-%0\n\t" "mov.l r9, @-%0\n\t"
"mov.l r8, @-%0" : : "r" (addr+4*10)); "mov.l r8, @-%0" : : "r" (addr));
} }
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
@ -132,29 +68,6 @@ static inline void stctx(void* addr)
*/ */
static inline void ldctx(void* addr) static inline void ldctx(void* addr)
{ {
unsigned int tmp;
/* same remarks than above */
#if 0
asm volatile ("mov.l @(0, %1), r8\n\t"
"mov.l @(4, %1), r9\n\t"
"mov.l @(8, %1), r10\n\t"
"mov.l @(12, %1), r11\n\t"
"add #16,%1\n\t"
"mov.l @(0, %1), r12\n\t"
"mov.l @(4, %1), r13\n\t"
"mov.l @(8, %1), r14\n\t"
"mov.l @(12, %1), r15\n\t"
"add #16,%1\n\t"
"mov.l @(0, %1), r0\n\t"
"ldc %0, sr\n\t"
"mov.l @(4, %1), %0\n\t"
"ldc %0, gbr\n\t"
"mov.l @(8, %1), %0\n\t"
"lds %0, pr\n\t"
"mov.l %0, @(0, r15)" : "=r&" (tmp) : "r" (addr));
#endif
asm volatile ("mov.l @%0+,r8\n\t" asm volatile ("mov.l @%0+,r8\n\t"
"mov.l @%0+,r9\n\t" "mov.l @%0+,r9\n\t"
"mov.l @%0+,r10\n\t" "mov.l @%0+,r10\n\t"
@ -166,8 +79,9 @@ static inline void ldctx(void* addr)
"lds.l @%0+,mach\n\t" "lds.l @%0+,mach\n\t"
"lds.l @%0+,macl\n\t" "lds.l @%0+,macl\n\t"
"ldc.l @%0+,sr\n\t" "ldc.l @%0+,sr\n\t"
"lds.l @%0+,pr" : : "r" (addr)); "mov.l @%0,%0\n\t"
"lds %0,pr\n\t"
"mov.l %0, @(0, r15)" : "+r" (addr));
} }
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------