Fix unified syntax in ARM inline assembly
GCC 4.9 always emits assembly with divided syntax. Setting unified syntax in inline assembly causes the assembler to complain about GCC's generated code, because the directive extends past the scope of the inline asm. Fix this by setting divided mode at the end of the inline assembly block. The assembler directives are hidden behind macros because later versions of GCC won't need this workaround: they can be told to use the unified syntax with -masm-syntax-unified. Change-Id: Ic09e729e5bbb6fd44d08dac348daf6f55c75d7d8
This commit is contained in:
parent
86429dbf1e
commit
58b2e45782
7 changed files with 28 additions and 9 deletions
|
@ -53,13 +53,14 @@ enum state_enum
|
||||||
#define CMP_3_CONST(_a, _b) \
|
#define CMP_3_CONST(_a, _b) \
|
||||||
({ int _x; \
|
({ int _x; \
|
||||||
asm volatile ( \
|
asm volatile ( \
|
||||||
".syntax unified \n" \
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED \
|
||||||
"ldrb %[x], [%[a], #0] \n" \
|
"ldrb %[x], [%[a], #0] \n" \
|
||||||
"eors %[x], %[x], %[b0] \n" \
|
"eors %[x], %[x], %[b0] \n" \
|
||||||
"ldrbeq %[x], [%[a], #1] \n" \
|
"ldrbeq %[x], [%[a], #1] \n" \
|
||||||
"eorseq %[x], %[x], %[b1] \n" \
|
"eorseq %[x], %[x], %[b1] \n" \
|
||||||
"ldrbeq %[x], [%[a], #2] \n" \
|
"ldrbeq %[x], [%[a], #2] \n" \
|
||||||
"eorseq %[x], %[x], %[b2] \n" \
|
"eorseq %[x], %[x], %[b2] \n" \
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED \
|
||||||
: [x]"=&r"(_x) \
|
: [x]"=&r"(_x) \
|
||||||
: [a]"r"(_a), \
|
: [a]"r"(_a), \
|
||||||
[b0]"i"(((_b) >> 24) & 0xff), \
|
[b0]"i"(((_b) >> 24) & 0xff), \
|
||||||
|
@ -71,7 +72,7 @@ enum state_enum
|
||||||
#define CMP_4_CONST(_a, _b) \
|
#define CMP_4_CONST(_a, _b) \
|
||||||
({ int _x; \
|
({ int _x; \
|
||||||
asm volatile ( \
|
asm volatile ( \
|
||||||
".syntax unified \n" \
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED \
|
||||||
"ldrb %[x], [%[a], #0] \n" \
|
"ldrb %[x], [%[a], #0] \n" \
|
||||||
"eors %[x], %[x], %[b0] \n" \
|
"eors %[x], %[x], %[b0] \n" \
|
||||||
"ldrbeq %[x], [%[a], #1] \n" \
|
"ldrbeq %[x], [%[a], #1] \n" \
|
||||||
|
@ -80,6 +81,7 @@ enum state_enum
|
||||||
"eorseq %[x], %[x], %[b2] \n" \
|
"eorseq %[x], %[x], %[b2] \n" \
|
||||||
"ldrbeq %[x], [%[a], #3] \n" \
|
"ldrbeq %[x], [%[a], #3] \n" \
|
||||||
"eorseq %[x], %[x], %[b3] \n" \
|
"eorseq %[x], %[x], %[b3] \n" \
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED \
|
||||||
: [x]"=&r"(_x) \
|
: [x]"=&r"(_x) \
|
||||||
: [a]"r"(_a), \
|
: [a]"r"(_a), \
|
||||||
[b0]"i"(((_b) >> 24) & 0xff), \
|
[b0]"i"(((_b) >> 24) & 0xff), \
|
||||||
|
|
|
@ -61,7 +61,7 @@ int corelock_try_lock(struct corelock *cl)
|
||||||
|
|
||||||
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
||||||
asm volatile (
|
asm volatile (
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
"mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
|
"mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
|
||||||
"ldrb r1, [r1] \n"
|
"ldrb r1, [r1] \n"
|
||||||
"strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
|
"strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
|
||||||
|
@ -74,6 +74,7 @@ int corelock_try_lock(struct corelock *cl)
|
||||||
"ands %[rv], %[rv], r1 \n"
|
"ands %[rv], %[rv], r1 \n"
|
||||||
"strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
|
"strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
|
||||||
"1: \n" /* Done */
|
"1: \n" /* Done */
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: [rv] "=r"(rval)
|
: [rv] "=r"(rval)
|
||||||
: [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
|
: [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
|
||||||
: "r1","r2","cc"
|
: "r1","r2","cc"
|
||||||
|
|
|
@ -73,7 +73,7 @@ static inline void store_context(void* addr)
|
||||||
static inline void load_context(const void* addr)
|
static inline void load_context(const void* addr)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
"ldr r0, [%0, #40] \n" /* Load start pointer */
|
"ldr r0, [%0, #40] \n" /* Load start pointer */
|
||||||
"cmp r0, #0 \n" /* Check for NULL */
|
"cmp r0, #0 \n" /* Check for NULL */
|
||||||
|
|
||||||
|
@ -86,6 +86,7 @@ static inline void load_context(const void* addr)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
"ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
|
"ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: : "r" (addr) : "r0" /* only! */
|
: : "r" (addr) : "r0" /* only! */
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1005,6 +1005,16 @@ Lyre prototype 1 */
|
||||||
#define ROCKBOX_STRICT_ALIGN 1
|
#define ROCKBOX_STRICT_ALIGN 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These macros are for switching on unified syntax in inline assembly.
|
||||||
|
* Older versions of GCC emit assembly in divided syntax with no option
|
||||||
|
* to enable unified syntax.
|
||||||
|
*
|
||||||
|
* FIXME: This needs to be looked at after the toolchain bump
|
||||||
|
*/
|
||||||
|
#define BEGIN_ARM_ASM_SYNTAX_UNIFIED ".syntax unified\n"
|
||||||
|
#define END_ARM_ASM_SYNTAX_UNIFIED ".syntax divided\n"
|
||||||
|
|
||||||
#if defined(CPU_ARM) && defined(__ASSEMBLER__)
|
#if defined(CPU_ARM) && defined(__ASSEMBLER__)
|
||||||
.syntax unified
|
.syntax unified
|
||||||
/* ARMv4T doesn't switch the T bit when popping pc directly, we must use BX */
|
/* ARMv4T doesn't switch the T bit when popping pc directly, we must use BX */
|
||||||
|
|
|
@ -218,7 +218,7 @@ void fiq_handler(void)
|
||||||
* r0-r3 and r12 is a working register.
|
* r0-r3 and r12 is a working register.
|
||||||
*/
|
*/
|
||||||
asm volatile (
|
asm volatile (
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
"sub lr, lr, #4 \n"
|
"sub lr, lr, #4 \n"
|
||||||
"stmfd sp!, { r0-r3, lr } \n" /* stack scratch regs and lr */
|
"stmfd sp!, { r0-r3, lr } \n" /* stack scratch regs and lr */
|
||||||
"mov r14, #0 \n" /* Was the callback called? */
|
"mov r14, #0 \n" /* Was the callback called? */
|
||||||
|
@ -274,6 +274,7 @@ void fiq_handler(void)
|
||||||
"bhi .fill_fifo \n" /* not stop and enough? refill */
|
"bhi .fill_fifo \n" /* not stop and enough? refill */
|
||||||
"ldmfd sp!, { r0-r3, pc }^ \n" /* exit */
|
"ldmfd sp!, { r0-r3, pc }^ \n" /* exit */
|
||||||
".ltorg \n"
|
".ltorg \n"
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: : "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED)
|
: : "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -327,7 +327,7 @@ void fiq_playback(void)
|
||||||
*/
|
*/
|
||||||
asm volatile (
|
asm volatile (
|
||||||
/* No external calls */
|
/* No external calls */
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
"sub lr, lr, #4 \n" /* Prepare return address */
|
"sub lr, lr, #4 \n" /* Prepare return address */
|
||||||
"stmfd sp!, { lr } \n" /* stack lr so we can use it */
|
"stmfd sp!, { lr } \n" /* stack lr so we can use it */
|
||||||
"ldr r12, =0xcf001040 \n" /* Some magic from iPodLinux ... */
|
"ldr r12, =0xcf001040 \n" /* Some magic from iPodLinux ... */
|
||||||
|
@ -395,6 +395,7 @@ void fiq_playback(void)
|
||||||
"bne 3b \n" /* no? -> go return */
|
"bne 3b \n" /* no? -> go return */
|
||||||
"b 2b \n" /* yes -> get even more */
|
"b 2b \n" /* yes -> get even more */
|
||||||
".ltorg \n"
|
".ltorg \n"
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: /* These must only be integers! No regs */
|
: /* These must only be integers! No regs */
|
||||||
: "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED));
|
: "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED));
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
#if ORDER > 32
|
#if ORDER > 32
|
||||||
"mov %[res], #0 \n"
|
"mov %[res], #0 \n"
|
||||||
#endif
|
#endif
|
||||||
|
@ -186,6 +186,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
"99: \n"
|
"99: \n"
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: /* outputs */
|
: /* outputs */
|
||||||
#if ORDER > 32
|
#if ORDER > 32
|
||||||
[cnt]"+r"(cnt),
|
[cnt]"+r"(cnt),
|
||||||
|
@ -215,7 +216,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
#if ORDER > 32
|
#if ORDER > 32
|
||||||
"mov %[res], #0 \n"
|
"mov %[res], #0 \n"
|
||||||
#endif
|
#endif
|
||||||
|
@ -356,6 +357,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
"99: \n"
|
"99: \n"
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: /* outputs */
|
: /* outputs */
|
||||||
#if ORDER > 32
|
#if ORDER > 32
|
||||||
[cnt]"+r"(cnt),
|
[cnt]"+r"(cnt),
|
||||||
|
@ -383,7 +385,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
asm volatile (
|
asm volatile (
|
||||||
".syntax unified \n"
|
BEGIN_ARM_ASM_SYNTAX_UNIFIED
|
||||||
#if ORDER > 32
|
#if ORDER > 32
|
||||||
"mov %[res], #0 \n"
|
"mov %[res], #0 \n"
|
||||||
#endif
|
#endif
|
||||||
|
@ -477,6 +479,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
"99: \n"
|
"99: \n"
|
||||||
|
END_ARM_ASM_SYNTAX_UNIFIED
|
||||||
: /* outputs */
|
: /* outputs */
|
||||||
#if ORDER > 32
|
#if ORDER > 32
|
||||||
[cnt]"+r"(cnt),
|
[cnt]"+r"(cnt),
|
||||||
|
|
Loading…
Reference in a new issue