ARM Headers to Get Proper Call Stacks - gcc

I am currently carrying out optimizations on a linux-based software itself on an ARM processor. Those optimizations are mostly in the form of ARM and ARM NEON functions.
In order to profile the software I use perf record and flame-graphs, however, once I introduce the assembler functions, they do not stack on top of the functions that call them but rather seemingly random places.
My question therefore was, what should I include in my functions for them to appear properly in the call stacks.
There was a slightly related topic but no good answer was given How to get call graph profiling working with gcc compiled code and ARM Cortex A8 target?. I use the same flags plus mapcs-frame.
Below, I give an example of a C function translated to ARM by GCC. This ARM function seems to produces decent stacks but I would like to understand why.
int half(int in);
int sum(int in1, int in2);
int mean(int in1, int in2);
int half(int i)
{
return i / 2;
}
int sum(int i, int j)
{
return i + j;
}
int mean(int i, int j)
{
int s = sum(i, j);
int m = half(s);
return m;
}
int main()
{
int a = 1;
int b = 5;
int i;
int result;
for (i = 0; i<10000000; i++) {
result = mean(a, b);
}
return 0;
}
.cpu cortex-a9
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu neon
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 6
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.file "a.c"
.text
.align 2
.global half
.type half, %function
half:
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
mov ip, sp
stmfd sp!, {fp, ip, lr, pc}
sub fp, ip, #4
sub sp, sp, #8
str r0, [fp, #-16]
ldr r3, [fp, #-16]
mov r2, r3, lsr #31
add r3, r2, r3
mov r3, r3, asr #1
mov r0, r3
sub sp, fp, #12
ldmfd sp, {fp, sp, pc}
.size half, .-half
.align 2
.global sum
.type sum, %function
sum:
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
mov ip, sp
stmfd sp!, {fp, ip, lr, pc}
sub fp, ip, #4
sub sp, sp, #8
str r0, [fp, #-16]
str r1, [fp, #-20]
ldr r2, [fp, #-16]
ldr r3, [fp, #-20]
add r3, r2, r3
mov r0, r3
sub sp, fp, #12
ldmfd sp, {fp, sp, pc}
.size sum, .-sum
.align 2
.global mean
.type mean, %function
mean:
# args = 0, pretend = 0, frame = 16
# frame_needed = 1, uses_anonymous_args = 0
mov ip, sp
stmfd sp!, {fp, ip, lr, pc}
sub fp, ip, #4
sub sp, sp, #16
str r0, [fp, #-24]
str r1, [fp, #-28]
ldr r1, [fp, #-28]
ldr r0, [fp, #-24]
bl sum
str r0, [fp, #-16]
ldr r0, [fp, #-16]
bl half
str r0, [fp, #-20]
ldr r3, [fp, #-20]
mov r0, r3
sub sp, fp, #12
ldmfd sp, {fp, sp, pc}
.size mean, .-mean
.align 2
.global main
.type main, %function
main:
# args = 0, pretend = 0, frame = 16
# frame_needed = 1, uses_anonymous_args = 0
mov ip, sp
stmfd sp!, {fp, ip, lr, pc}
sub fp, ip, #4
sub sp, sp, #16
mov r3, #1
str r3, [fp, #-20]
mov r3, #5
str r3, [fp, #-24]
mov r3, #0
str r3, [fp, #-16]
b .L8
.L9:
ldr r1, [fp, #-24]
ldr r0, [fp, #-20]
bl mean
str r0, [fp, #-28]
ldr r3, [fp, #-16]
add r3, r3, #1
str r3, [fp, #-16]
.L8:
ldr r2, [fp, #-16]
movw r3, #38527
movt r3, 152
cmp r2, r3
ble .L9
mov r3, #0
mov r0, r3
sub sp, fp, #12
ldmfd sp, {fp, sp, pc}
.size main, .-main
.ident "GCC: (crosstool-NG linaro-1.13.1-4.9-2014.09 - Linaro GCC 4.9-2014.09) 4.9.2 20140904 (prerelease)"
.section .note.GNU-stack,"",%progbits
-------------------EDIT-------------------
Here is the example of the kind of function I am trying to integrate. In terms of linkage, all it does is save the stack and link register at the beginning and set them a the end. What should I add to it?
.section .text
.global ARM_smoothing
ARM_smoothing:
STMFD sp!, {r4-r12,lr} //move used registers on stack (avoid segmentation fault)
MOV r5, r0
ADD r0, r0, r2
ADD r0, r0, r2
MOV r8, r0
ADD r8, r8, r2
ADD r8, r8, r2 //the 6 instructions create 3 pointers to the row above and below as well as the current one
ADD r1, r1, r2
ADD r1, r1, r2
ADD r1, r1, #2 //move destination pointer to first element (1 row down, 1 element left)
SUB r2, r2, #2
SUB r3, r3, #2 //counters decremented because smoothing function works with a margin of 1 on every side
LDR r9, =0x1C71C71D //(1/9)*2^32 pour effectuer la division par 9
LDR r10, =0x2
LDR r11, =0xC //shifts for pointers to data
VLDR.U64 d20, =0x1C71C71D //(1/9)*2^32 pour effectuer la division par 9
VLDR.U64 d22, =0x0 //initialization of zeros to be used (not ncessarily needed)
VLDR.U64 d23, =0x0
VDUP.32 d20, d20[0] //initialize vector for multiplication
height_loop:
MOV r4, r2 //reset width counter
CMP r4, #8
BLGE width_loop_eight_smoothing //use neon while more than 8 elements in row need smoothing
CMP r4, #1
BLGE width_loop_rest //use normal ARM for remaining elements, can't do in NEON because of margin
ADD r0, r0, #4 //skip margin
ADD r1, r1, #4
ADD r5, r5, #4
ADD r8, r8, #4
SUBS r3, r3, #1 //decrement row counter
BNE height_loop //loop while there still are rows
LDMFD sp!, {r4-r12,pc} //restore stack and return to calling function
width_loop_eight_smoothing:
SUB r4, r4, #8 //decrement width counter
VLD1.16 {d0, d1}, [r5], r10 //load upper left elements
VLD1.16 {d2, d3}, [r5], r10 //load upper middle elements
VADDL.S16 q2, d0, d2 //long addition of elements to be sure to not lose any data
VADDL.S16 q3, d1, d3
VLD1.16 {d0, d1}, [r5], r11 //load upper right elements
VLD1.16 {d2, d3}, [r0], r10 //load middle left elements
VADDL.S16 q4, d0, d2
VADDL.S16 q5, d1, d3
VADD.S32 q2, q4 //add to grand total
VADD.S32 q3, q5
VLD1.16 {d0, d1}, [r0], r10 //load current elements
VLD1.16 {d2, d3}, [r0], r11 //load middle right elements
VADDL.S16 q4, d0, d2
VADDL.S16 q5, d1, d3
VADD.S32 q2, q4
VADD.S32 q3, q5
VLD1.16 {d0, d1}, [r8], r10 //load lower left elements
VLD1.16 {d2, d3}, [r8], r10 //load lower middle elements
VADDL.S16 q4, d0, d2
VADDL.S16 q5, d1, d3
VADD.S32 q2, q4
VADD.S32 q3, q5
VLD1.16 {d0, d1}, [r8], r11 //load lower right elements
VADDL.S16 q4, d0, d22
VADDL.S16 q5, d1, d23
VADD.S32 q2, q4
VADD.S32 q3, q5
VMULL.S32 q6, d4, d20 //divide by 9 (upper element is total divided by 9)
VMULL.S32 q7, d5, d20
VMULL.S32 q8, d6, d20
VMULL.S32 q9, d7, d20
VUZP.32 q6, q7 //pack results into less registers and smaller elements
VUZP.32 q8, q9
VUZP.16 q7, q9
VSHR.U16 q8, q7, #15 //when multiplied element is negative, result is always one under
VADD.S16 q7, q8 //rectifying by adding sign bit to total
VST1.16 {d14, d15}, [r1]! //store results
CMP r4, #8 //check if theres enough elements to do 8 more in NEON
BCS width_loop_eight_smoothing //if yes, loop neon code
MOV PC, LR //return to ARM_smoothing if not
width_loop_rest: //works similaarly to NEON but one element at a time
LDRSH r6, [r0], #2 //converts loaded half words to signed full words
LDRSH r7, [r0] //main difference is with the way increments are done since there is an overlap
ADD r6, r7, r6
LDRSH r7, [r0, #2]
ADD r6, r7, r6
LDRSH r7, [r5], #2
ADD r6, r7, r6
LDRSH r7, [r5]
ADD r6, r7, r6
LDRSH r7, [r5, #2]
ADD r6, r7, r6
LDRSH r7, [r8], #2
ADD r6, r7, r6
LDRSH r7, [r8]
ADD r6, r7, r6
LDRSH r7, [r8, #2]
ADD r6, r7, r6
SMULLS r6, r7, r6, r9
ADDMI r7, #1
STRH r7, [r1], #2
SUBS r4, #1 //decrement width counter and check if there's any left
BNE width_loop_rest
MOV PC, LR

You can clearly see how the compiler is annotating the assembler with some pseudo-ops...
.global mean
.type mean, %function
...
.size mean, .-mean
These are put in COFF sections and need to make it to a build so that the call graph tools can know what PC range is for your assembler function.
.global ARM_smoothing
+ .type ARM_smoothing, %function
...
+ .size ARM_smoothing, .-ARM_smoothing
Other pseudo-ops depend on the debug information needed.
.func
.endfunc
.size
ARM CFI question
.cantunwind
Others are .fnend, .fnstart, .movsp, .save, .setfp, etc.
It depends on the debug/object format expected by the tool. There are also two types of data;
code extent information
stack and frame use
Both are typically needed for unwinding (or a stack back trace) but a sampling performance tool might only get away with the first. Exception handling code that does object clean up requires the most information.
Related: ARM Link and frame register

Related

gcc ARM produces incorrect code - how to correct

gcc ARM for STM32F407 micro
The following function is used as a sanity check in FreeRtosTCP
UBaseType_t bIsValidNetworkDescriptor( const NetworkBufferDescriptor_t * pxDesc )
{
uint32_t offset = ( uint32_t ) ( ((const char *)pxDesc) - ((const char *)xNetworkBuffers) );
if( ( offset >= (uint32_t)(sizeof( xNetworkBuffers )) ) || ( ( offset % sizeof( xNetworkBuffers[0] ) ) != 0 ) )
return pdFALSE;
return (UBaseType_t) (pxDesc - xNetworkBuffers) + 1;
}
The line in question is ---> offset >= (uint32_t)(sizeof( xNetworkBuffers ))
gcc produces a bhi instruction after the cmp instead of a bhs.
If tries casting both as shown in the code above but nothing seems to get the bhs instruction to be used.
Any help appreciated.
Thanks.
Joe
Well knowing the exact size of the xNetworkBuffers array compiler can simply optimize it. Being curious I gave it a try. Following is the code with little modifications and the asm output and the explanation:
#include <stdint.h>
typedef struct abc {
char data[10];
}NetworkBufferDescriptor_t;
NetworkBufferDescriptor_t xNetworkBuffers[5];
int bIsValidNetworkDescriptor( const NetworkBufferDescriptor_t * pxDesc )
{
uint32_t offset = ( uint32_t ) ( ((const char *)pxDesc) - ((const char *)xNetworkBuffers) );
if( ( offset >= (uint32_t)(sizeof( xNetworkBuffers )) ) || ( ( offset % sizeof( xNetworkBuffers[0] ) ) != 0 ) )
return 0;
return (int) (pxDesc - xNetworkBuffers) + 1;
}
and the asm output is:
bIsValidNetworkDescriptor:
# Function supports interworking.
# args = 0, pretend = 0, frame = 16
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
str fp, [sp, #-4]!
add fp, sp, #0
sub sp, sp, #20
str r0, [fp, #-16]
ldr r3, [fp, #-16]
ldr r2, .L5
sub r3, r3, r2
str r3, [fp, #-8]
ldr r3, [fp, #-8]
cmp r3, #49
bhi .L2
ldr r1, [fp, #-8]
ldr r3, .L5+4
umull r2, r3, r1, r3
lsr r2, r3, #3
mov r3, r2
lsl r3, r3, #2
add r3, r3, r2
lsl r3, r3, #1
sub r2, r1, r3
cmp r2, #0
beq .L3
.L2:
mov r3, #0
b .L4
.L3:
ldr r3, [fp, #-16]
ldr r2, .L5
sub r3, r3, r2
asr r2, r3, #1
mov r3, r2
lsl r3, r3, #1
add r3, r3, r2
lsl r1, r3, #4
add r3, r3, r1
lsl r1, r3, #8
add r3, r3, r1
lsl r1, r3, #16
add r3, r3, r1
lsl r3, r3, #2
add r3, r3, r2
add r3, r3, #1
.L4:
mov r0, r3
add sp, fp, #0
# sp needed
ldr fp, [sp], #4
bx lr
.L6:
.align 2
.L5:
In the block quoted asm code you can see that it is comparing with 49 not 50 (which is the actual size of xNetworkBuffers) so the conclusion I got is
offset >= (uint32_t)(sizeof( xNetworkBuffers ))
is also equal to
offset > (uint32_t)(sizeof( xNetworkBuffers ) - 1) )
and in that case compiler can use BHI producing the same results
I think the code generated by GCC is correct, technically speaking. offset cannot be larger than INT_MAX, because this is the maximum value representable in ptrdiff_t on this architecture.
You can compute the difference like this:
uintptr_t offset = (uintptr_t)pxDesc - (uintptr_t)xNetworkBuffers;
This is still implementation-defined, but it will avoid the overflow problem.

ARM assembly quicksort and recursion

I'm trying to change C quick sort code to ARM assembly code.
Firstly, I apologize for my long question.
just in case, I wrote whole codes.
but only part that makes problem is function recursion part.
I almost done this work except funcion recursion part.
I've also tried converting C codes to ARM assembly with gcc cross compiler to see how compiler do. But there are still few things that I can't understand well.
This is C code.
void quicksort_c(int array[8],int first,int last)
{
int i, j, pivot, temp;
if(first < last)
{
pivot = first;
i = first;
j = last;
while(i < j)
{
while(array[i] <= array[pivot] && i < last)
{
i++;
}
while(array[j] > array[pivot])
{
j--;
}
if(i < j)
{
temp = array[i];
array[i] = array[j];
array[j] = temp;
}
}
temp = array[pivot];
array[pivot] = array[j];
array[j] = temp;
quicksort_c(array, first, j-1);
quicksort_c(array, j+1, last);
}
}
and this is arm assembly code that I wrote from scratch.
quicksort
STMFD sp!, {r0-r8,lr}
quicksort_recursion
CMP r1, r2 ;// if(first < last)
BGE out_of_if
outer_if
MOV r5, r1 ;// pivot = first
MOV r3, r1 ;// i = first;
MOV r4, r2 ;// j = last;
outer_while_loop
CMP r3, r4 ;// while(i < j)
BGE end_of_outer_while
outer_while
inner_while_1_loop
;//while(array[i] <= array[pivot] && i < last)
LDR r7, [r0, r3, lsl #2] ;//r7 = array[i]
LDR r8, [r0, r5, lsl #2] ;//r8 = array[pivot]
CMP r7, r8 ;//array[i] <= array[pivot]
BGT inner_while_2_loop
CMP r3, r2 ;// i < last
BGE inner_while_2_loop
inner_while_1
ADD r3, #1 ;//i++
B inner_while_1_loop
inner_while_2_loop
;//whlie(array[j] > array[pivot])
LDR r7, [r0, r4, lsl #2] ;// r7 = array[j]
LDR r8, [r0, r5, lsl #2] ;//r8 = array[pivot]
CMP r7, r8 ;// (array[j] > array[pivot])
BLE inner_if_cmp
inner_while_2
SUB r4, #1
B inner_while_2_loop
inner_if_cmp
CMP r3, r4;// if(i < j)
BGE outer_while_loop
inner_if
LDR r7, [r0, r3, lsl #2] ;// r7 = array[i]
MOV r6, r7 ;// temp = array[i]
LDR r8, [r0, r4, lsl #2] ;// r8 = array[j]
STR r8, [r0, r3, lsl #2] ;// array[i] = array[j]
STR r6, [r0, r4, lsl #2] ;// array[j] = temp;
B outer_while_loop
end_of_outer_while
LDR r7, [r0, r5, lsl #2] ;// r7 = array[pivot]
MOV r6, r7 ;// temp = array[pivot]
LDR r8, [r0, r4, lsl #2] ;// r8 = array[j]
STR r8, [r0, r5, lsl #2] ;// array[pivot] = array[j]
STR r6, [r0, r4, lsl #2] ;// array[j] = temp
;;//quicksort(array,first,j-1)
STMFD sp!, {r0-r8,lr}
SUBS r2, r4, #1
BL quicksort_recursion
;//LDMFD sp!, {r0-r8,pc}
;//quicksort(array, j+1, last)
STMFD sp!, {r0-r8,lr}
ADDS r1, r4, #1
BL quicksort_recursion
;//LDMFD sp!, {r0-r8,pc}
out_of_if
end_function
LDMFD sp!, {r0-r8,pc}
END
and this is how I use register
r0: array pointer
r1: first index
r2: last index
r3: i
r4: j
r5: pivot
r6: temp
r7: array[?] value
r8: array[?] value
I confirmed every parts except recursion of my code are work well.
At first, I did recursion in this way.
;;//quicksort(array,first,j-1)
SUBS r2, r4, #1
BL quicksort
;//quicksort(array, j+1, last)
ADDS r1, r4, #1
BL quicksort
but this code only do first function recursion.
ex)
Array before quicksorting : 5 1 4 7 8 3 6
Array after quicksorting : 1 2 4 3 5 8 7 6
I think it's because after function call, it doesn't go back to where it branched. so I added push and pop before and after function call like this.
;;//quicksort(array,first,j-1)
STMFD sp!, {r0-r8,lr}
SUBS r2, r4, #1
BL quicksort_recursion
;//quicksort(array, j+1, last)
STMFD sp!, {r0-r8,lr}
ADDS r1, r4, #1
BL quicksort_recursion
But this code falls in to infinite loop.
according to gcc compiled code,
quicksort_linux
cmp r1, r2
blt L16
bx lr
L16
push {r4, r5, r6, r7, r8, r9, r10, lr}
mov r10, r1
add ip, r0, r1, lsl #2
mov r5, r2
mov r4, r1
L3
lsls r3, r4, #2
add lr, r0, r3
ldr r7, [r0, r4, lsl #2]
ldr r6, [ip]
cmp r2, r4
ite le
movle r8, #0
movgt r8, #1
cmp r7, r6
it gt
movgt r8, #0
adds r3, r3, #4
add r3, r3, r0
cmp r8, #0
beq L9
L4
adds r4, r4, #1
mov lr, r3
ldr r7, [r3], #4
cmp r2, r4
ite le
movle r1, #0
movgt r1, #1
cmp r7, r6
it gt
movgt r1, #0
cmp r1, #0
bne L4
L9
lsls r3, r5, #2
add r9, r0, r3
ldr r1, [r0, r5, lsl #2]
cmp r1, r6
ble L5
subs r3, r3, #4
add r3, r3, r0
L6
subs r5, r5, #1
mov r9, r3
ldr r1, [r3], #-4
cmp r1, r6
bgt L6
L5
cmp r4, r5
bge L7
str r1, [lr]
str r7, [r9]
b L3
L7
mov r7, r2
mov r1, r10
mov r4, r0
ldr r3, [r0, r5, lsl #2]
str r3, [r0, r10, lsl #2]
str r6, [r0, r5, lsl #2]
subs r2, r5, #1
bl quicksort_linux
mov r2, r7
adds r1, r5, #1
mov r0, r4
bl quicksort_linux
pop {r4, r5, r6, r7, r8, r9, r10, pc}
END
It seems like they don't do any push or pop before or after branch but it works.
So, question is
1.what is problem of my function recursion code?
2.How can I fix that?
3.How can gcc compiled code works without any push and pop before and after function recursion?

Why does _exit() jump to _etext?

I am running a project using the ARM Embedded Tollchain on a stm32 microcontroller which uses the newLib.
I called assert(false) to test the assert output and ended in a Hard Fault Exception. I debugged into the assembly of assert(...) and found out that a subsequent call to _exit(1) jumps to a Address which is called _etext. Taking a look to the manpage of _etext shows that _etext is the address of the end of the .text section.
I am really confused. Normally I had supposed that _exit() is calling __exit() (which is defined as global symbol by the newLib) which I had implemented in a file named syscalls.c.
Why does _exit() jump to _etext?
Here are some cope snippets for a better understanding:
The subsequent call to _exit() by assert() taken from newLib 2.5:
_VOID
_DEFUN_VOID (abort)
{
#ifdef ABORT_MESSAGE
write (2, "Abort called\n", sizeof ("Abort called\n")-1);
#endif
while (1)
{
raise (SIGABRT);
_exit (1);
}
}
The disassembly of abort and assert. Take a special look to address 0808a10a where the jump to 80a5198 (_etext) is performed:
abort:
0808a100: push {r3, lr}
0808a102: movs r0, #6
0808a104: bl 0x808bfdc <raise>
0808a108: movs r0, #1
0808a10a: bl 0x80a51d8
0808a10e: nop
__assert_func:
0808a110: push {lr}
0808a112: ldr r4, [pc, #40] ; (0x808a13c <__assert_func+44>)
0808a114: ldr r6, [r4, #0]
0808a116: mov r5, r0
0808a118: sub sp, #20
0808a11a: mov r4, r3
0808a11c: ldr r0, [r6, #12]
0808a11e: cbz r2, 0x808a136 <__assert_func+38>
0808a120: ldr r3, [pc, #28] ; (0x808a140 <__assert_func+48>)
0808a122: str r2, [sp, #8]
0808a124: stmia.w sp, {r1, r3}
0808a128: mov r2, r4
0808a12a: mov r3, r5
0808a12c: ldr r1, [pc, #20] ; (0x808a144 <__assert_func+52>)
0808a12e: bl 0x808a5f4 <fiprintf>
0808a132: bl 0x808a100 <abort>
0808a136: ldr r3, [pc, #16] ; (0x808a148 <__assert_func+56>)
0808a138: mov r2, r3
0808a13a: b.n 0x808a122 <__assert_func+18>
0808a13c: str r0, [r3, #120] ; 0x78
0808a13e: movs r0, #0
0808a140: add r12, r11
0808a142: lsrs r2, r1, #32
0808a144: add r12, sp
0808a146: lsrs r2, r1, #32
0808a148: add r8, sp
0808a14a: lsrs r2, r1, #32
The lss-file which shows that 80a5198 is the address of _etext:
0808a0c0 <abort>:
808a0c0: b508 push {r3, lr}
808a0c2: 2006 movs r0, #6
808a0c4: f001 ff6a bl 808bf9c <raise>
808a0c8: 2001 movs r0, #1
808a0ca: f01b f865 bl 80a5198 <_etext>
808a0ce: bf00 nop

arm-eabi-none won't assemble floating point instructions

I am programing for a hardware project. I need an implementation of sin, cos, tan, sqrt, etc. for this project. I do not have any standard libraries available to me for this project, and the results must have floating point retained. Whenever I try to assemble the file (shown below the output), the assembler gives me the error
_core_math_arm.s:30: Error: selected processor does not support ARM mode `fltd f0,r3`
_core_math_arm.s:31: Error: selected processor does not support ARM mode `sindz f0,f0`
_core_math_arm.s:32: Error: selected processor does not support ARM mode `fix r3 ,f0`
I am assembling with arm-none-eabi-as -o_core_math_arm.bin _core_math_arm.s -march=armv7-a -mcpu=cortex-a5 -mfpu=vfpv4 -mfloat=hard.
All the options match the target processor that I want.
I downloaded this toolchain from https://launchpad.net/gcc-arm-embedded/+download.
The file that is being assembled is as shown below:
.cpu arm7tdmi
.fpu softvfp
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 1
.eabi_attribute 30, 6
.eabi_attribute 34, 0
.eabi_attribute 18, 4
.file "core_math.c"
.text
.align 2
.global sin
.type sin, %function
sin:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
# 6 "core_math.c" 1
fltd f0, r3
sindz f0, f0
fix r3, f0
# 0 "" 2
str r3, [fp, #-12]
str r4, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size sin, .-sin
.align 2
.global cos
.type cos, %function
cos:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size cos, .-cos
.align 2
.global tan
.type tan, %function
tan:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size tan, .-tan
.align 2
.global csc
.type csc, %function
csc:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size csc, .-csc
.align 2
.global sec
.type sec, %function
sec:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size sec, .-sec
.align 2
.global cot
.type cot, %function
cot:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size cot, .-cot
.align 2
.global sqrt
.type sqrt, %function
sqrt:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size sqrt, .-sqrt
.align 2
.global invsqrt
.type invsqrt, %function
invsqrt:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size invsqrt, .-invsqrt
.global __aeabi_dadd
.align 2
.global pow
.type pow, %function
pow:
# Function supports interworking.
# args = 0, pretend = 0, frame = 16
# frame_needed = 1, uses_anonymous_args = 0
stmfd sp!, {r4, fp, lr}
add fp, sp, #8
sub sp, sp, #20
str r0, [fp, #-20]
str r1, [fp, #-16]
str r2, [fp, #-28]
str r3, [fp, #-24]
sub r1, fp, #20
ldmia r1, {r0-r1}
sub r3, fp, #28
ldmia r3, {r2-r3}
bl __aeabi_dadd
mov r3, r0
mov r4, r1
mov r0, r3
mov r1, r4
sub sp, fp, #8
# sp needed
ldmfd sp!, {r4, fp, lr}
bx lr
.size pow, .-pow
.align 2
.global ln
.type ln, %function
ln:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size ln, .-ln
.align 2
.global log10
.type log10, %function
log10:
# Function supports interworking.
# args = 0, pretend = 0, frame = 8
# frame_needed = 1, uses_anonymous_args = 0
# link register save eliminated.
stmfd sp!, {r4, fp}
add fp, sp, #4
sub sp, sp, #8
str r0, [fp, #-12]
str r1, [fp, #-8]
sub r4, fp, #12
ldmia r4, {r3-r4}
mov r0, r3
mov r1, r4
sub sp, fp, #4
# sp needed
ldmfd sp!, {r4, fp}
bx lr
.size log10, .-log10
.align 2
.global logn
.type logn, %function
logn:
# Function supports interworking.
# args = 0, pretend = 0, frame = 16
# frame_needed = 1, uses_anonymous_args = 0
stmfd sp!, {r4, fp, lr}
add fp, sp, #8
sub sp, sp, #20
str r0, [fp, #-20]
str r1, [fp, #-16]
str r2, [fp, #-28]
str r3, [fp, #-24]
sub r1, fp, #20
ldmia r1, {r0-r1}
sub r3, fp, #28
ldmia r3, {r2-r3}
bl __aeabi_dadd
mov r3, r0
mov r4, r1
mov r0, r3
mov r1, r4
sub sp, fp, #8
# sp needed
ldmfd sp!, {r4, fp, lr}
bx lr
.size logn, .-logn
.ident "GCC: (GNU Tools for ARM Embedded Processors) 4.9.3 20141119 (release) [ARM/embedded-4_9-branch revision 218278]"
This file IS generated by a -S option to arm-none-gcc.
The instructions seem to not be able to be recognized 100% properly by the assembler to be fpu instructions.
Thanks in advance.

Hardfault on STM32F030 startup, __libc_init_array

I'm trying to get a STM32Cube project compiled using arm-none-eabi-gcc and a Makefile.
I have specified:
CFLAGS = -mthumb\
-march=armv6-m\
-mlittle-endian\
-mcpu=cortex-m0\
-ffunction-sections\
-fdata-sections\
-MMD\
-std=c99\
-Wall\
-g\
-D$(PART)\
-c
and:
LDFLAGS = -Wl,--gc-sections\
-Wl,-T$(LDFILE)\
-Wl,-v
The FW builds without problems.but when I boot the MCU i get stuck in Hard Fault.
Stack trace is:
#0 HardFault_Handler () at ./Src/main.c:156
#1 <signal handler called>
#2 0x0800221c in ____libc_init_array_from_thumb ()
#3 0x080021be in LoopFillZerobss () at Src/startup_stm32f030x8.s:103
#4 0x080021be in LoopFillZerobss () at Src/startup_stm32f030x8.s:103
Backtrace stopped: previous frame identical to this frame (corrupt stack?)
and I go straight to Hard Fault when stepping to bl __libc_init_array in the startup file.
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
Any ideas what could be wrong?
My arm-none-eabi-gcc version is 4.8.4 20140725 (release)
[edit]
The disassembly of the calls
08002218 <____libc_init_array_from_thumb>:
8002218: 4778 bx pc
800221a: 46c0 nop ; (mov r8, r8)
800221c: eafff812 b 800026c <__libc_init_array>
0800026c <__libc_init_array>:
800026c: e92d4070 push {r4, r5, r6, lr}
8000270: e59f506c ldr r5, [pc, #108] ; 80002e4 <__libc_init_array+0x78>
8000274: e59f606c ldr r6, [pc, #108] ; 80002e8 <__libc_init_array+0x7c>
8000278: e0656006 rsb r6, r5, r6
800027c: e1b06146 asrs r6, r6, #2
8000280: 12455004 subne r5, r5, #4
8000284: 13a04000 movne r4, #0
8000288: 0a000005 beq 80002a4 <__libc_init_array+0x38>
800028c: e2844001 add r4, r4, #1
8000290: e5b53004 ldr r3, [r5, #4]!
8000294: e1a0e00f mov lr, pc
8000298: e12fff13 bx r3
800029c: e1560004 cmp r6, r4
80002a0: 1afffff9 bne 800028c <__libc_init_array+0x20>
80002a4: e59f5040 ldr r5, [pc, #64] ; 80002ec <__libc_init_array+0x80>
80002a8: e59f6040 ldr r6, [pc, #64] ; 80002f0 <__libc_init_array+0x84>
80002ac: e0656006 rsb r6, r5, r6
80002b0: eb0007ca bl 80021e0 <_init>
80002b4: e1b06146 asrs r6, r6, #2
80002b8: 12455004 subne r5, r5, #4
80002bc: 13a04000 movne r4, #0
80002c0: 0a000005 beq 80002dc <__libc_init_array+0x70>
80002c4: e2844001 add r4, r4, #1
80002c8: e5b53004 ldr r3, [r5, #4]!
80002cc: e1a0e00f mov lr, pc
80002d0: e12fff13 bx r3
80002d4: e1560004 cmp r6, r4
80002d8: 1afffff9 bne 80002c4 <__libc_init_array+0x58>
80002dc: e8bd4070 pop {r4, r5, r6, lr}
80002e0: e12fff1e bx lr
80002e4: 08002258 .word 0x08002258
80002e8: 08002258 .word 0x08002258
80002ec: 08002258 .word 0x08002258
80002f0: 08002260 .word 0x08002260
[edit 2]
The register values from gdb:
(gdb) info reg
r0 0x20000000 536870912
r1 0x1 1
r2 0x0 0
r3 0x40021000 1073876992
r4 0xffffffff -1
r5 0xffffffff -1
r6 0xffffffff -1
r7 0x20001fd0 536879056
r8 0xffffffff -1
r9 0xffffffff -1
r10 0xffffffff -1
r11 0xffffffff -1
r12 0xffffffff -1
sp 0x20001fd0 0x20001fd0
lr 0xfffffff9 -7
pc 0x800067c 0x800067c <HardFault_Handler+4>
xPSR 0x61000003 1627389955
That __libc_init_array is ARM code, not Thumb, hence the M0 will fall over trying to execute some nonsense it doesn't understand (actually, it never quite gets there since it faults on the attempt to switch to ARM state in the bx, but hey, same difference...)
You'll need to make sure you use pure-Thumb versions of any libraries - a Cortex-M-specific toolchain might be a better bet than a generic ARM one. If you have a multilib toolchain, I'd suggest checking the output of arm-none-eabi-gcc --print-multi-lib to make sure you've specified all the relevant options to get proper Cortex-M libraries, and if you're using a separate link step, make sure you invoke it with LD=arm-none-eabi-gcc (plus the relevant multilib options), rather than LD=arm-none-eabi-ld.

Resources