summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBert Lange <b.lange@fzd.de>2011-11-07 14:55:53 +0100
committerBert Lange <b.lange@fzd.de>2011-11-07 14:55:53 +0100
commit898dc855c3d93a237edfbf7686565e35a0e67f3e (patch)
tree351a5edb6107a4d6262780705aa1790fe3e36f3e
parent83f5520b62663857ce8e4813ee8de2bb3cbd07b0 (diff)
downloadzpu-898dc855c3d93a237edfbf7686565e35a0e67f3e.zip
zpu-898dc855c3d93a237edfbf7686565e35a0e67f3e.tar.gz
initial commit
-rw-r--r--hw_s3estarter/software/test/crt0.S963
-rw-r--r--hw_s3estarter/software/test/crt0_phi.S179
-rw-r--r--hw_s3estarter/software/test/linker_script.x193
-rw-r--r--hw_s3estarter/software/test/test.ld44
4 files changed, 1379 insertions, 0 deletions
diff --git a/hw_s3estarter/software/test/crt0.S b/hw_s3estarter/software/test/crt0.S
new file mode 100644
index 0000000..66af76d
--- /dev/null
+++ b/hw_s3estarter/software/test/crt0.S
@@ -0,0 +1,963 @@
+/* Startup code for ZPU
+ Copyright (C) 2005 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+ .file "crt0.S"
+
+
+
+
+; .section ".fixed_vectors","ax"
+; KLUDGE!!! we remove the executable bit to avoid relaxation
+ .section ".fixed_vectors","a"
+
+; DANGER!!!!
+; we need to align these code sections to 32 bytes, which
+; means we must not use any assembler instructions that are relaxed
+; at linker time
+; DANGER!!!!
+
+ .macro fixedim value
+ im \value
+ .endm
+
+ .macro jsr address
+ im _memreg ; save R0
+ load
+ im _memreg+4 ; save R1
+ load
+ im _memreg+8 ; save R2
+ load
+ fixedim \address
+ call
+ im _memreg+8
+ store ; restore R2
+ im _memreg+4
+ store ; restore R1
+ im _memreg
+ store ; restore R0
+ .endm
+
+
+ .macro jmp address
+ fixedim \address
+ poppc
+ .endm
+
+
+ .macro fast_neg
+ not
+ im 1
+ add
+ .endm
+
+ .macro cimpl funcname
+ ; save R0
+ im _memreg
+ load
+
+ ; save R1
+ im _memreg+4
+ load
+
+ ; save R2
+ im _memreg+8
+ load
+
+ loadsp 20
+ loadsp 20
+
+ fixedim \funcname
+ call
+
+ ; destroy arguments on stack
+ storesp 0
+ storesp 0
+
+ im _memreg
+ load
+
+ ; poke the result into the right slot
+ storesp 24
+
+ ; restore R2
+ im _memreg+8
+ store
+
+ ; restore R1
+ im _memreg+4
+ store
+
+ ; restore r0
+ im _memreg
+ store
+
+
+ storesp 4
+ poppc
+ .endm
+
+ .macro mult1bit
+ ; create mask of lowest bit in A
+ loadsp 8 ; A
+ im 1
+ and
+ im -1
+ add
+ not
+ loadsp 8 ; B
+ and
+ add ; accumulate in C
+
+ ; shift B left 1 bit
+ loadsp 4 ; B
+ addsp 0
+ storesp 8 ; B
+
+ ; shift A right 1 bit
+ loadsp 8 ; A
+ flip
+ addsp 0
+ flip
+ storesp 12 ; A
+ .endm
+
+
+
+/* vectors */
+ .balign 32,0
+# offset 0x0000 0000
+ .globl _start
+ .globl _memreg
+ .weak _memreg
+_start:
+_memreg:
+ ; intSp must be 0 when we jump to _premain
+
+ im ZPU_ID
+ loadsp 0
+ im _cpu_config
+ store
+ config
+ jmp _premain
+
+
+
+ .balign 32,0
+# offset 0x0000 0020
+ .globl _zpu_interrupt_vector
+_zpu_interrupt_vector:
+ jmp ___zpu_interrupt_vector
+
+/* instruction emulation code */
+
+# opcode 34
+# offset 0x0000 0040
+ .balign 32,0
+_loadh:
+ loadsp 4
+ ; by not masking out bit 0, we cause a memory access error
+ ; on unaligned access
+ im ~0x2
+ and
+ load
+
+ ; mult 8
+ loadsp 8
+ im 3
+ and
+ fast_neg
+ im 2
+ add
+ im 3
+ ashiftleft
+ ; shift right addr&3 * 8
+ lshiftright
+ im 0xffff
+ and
+ storesp 8
+
+ poppc
+
+# opcode 35
+# offset 0x0000 0060
+ .balign 32,0
+_storeh:
+ loadsp 4
+ ; by not masking out bit 0, we cause a memory access error
+ ; on unaligned access
+ im ~0x2
+ and
+ load
+
+ ; mask
+ im 0xffff
+ loadsp 12
+ im 3
+ and
+ fast_neg
+ im 2
+ add
+ im 3
+ ashiftleft
+ ashiftleft
+ not
+
+ and
+
+ loadsp 12
+ im 0xffff
+
+ nop
+
+ fixedim _storehtail
+ poppc
+
+
+# opcode 36
+# offset 0x0000 0080
+ .balign 32,0
+_lessthan:
+ loadsp 8
+ fast_neg
+ loadsp 8
+ add
+
+ ; DANGER!!!!
+ ; 0x80000000 will overflow when negated, so we need to mask
+ ; the result above with the compare positive to negative
+ ; number case
+ loadsp 12
+ loadsp 12
+ not
+ and
+ not
+ and
+
+
+ ; handle case where we are comparing a negative number
+ ; and positve number. This can underflow. E.g. consider 0x8000000 < 0x1000
+ loadsp 12
+ not
+ loadsp 12
+ and
+
+ or
+
+
+
+ flip
+ im 1
+ and
+
+
+ storesp 12
+ storesp 4
+ poppc
+
+
+# opcode 37
+# offset 0x0000 00a0
+ .balign 32,0
+_lessthanorequal:
+ loadsp 8
+ loadsp 8
+ lessthan
+ loadsp 12
+ loadsp 12
+ eq
+ or
+
+ storesp 12
+ storesp 4
+ poppc
+
+
+# opcode 38
+# offset 0x0000 00c0
+ .balign 32,0
+_ulessthan:
+ ; fish up arguments
+ loadsp 4
+ loadsp 12
+
+ /* low: -1 if low bit dif is negative 0 otherwise: neg (not x&1 and (y&1))
+ x&1 y&1 neg (not x&1 and (y&1))
+ 1 1 0
+ 1 0 0
+ 0 1 -1
+ 0 0 0
+
+ */
+ loadsp 4
+ not
+ loadsp 4
+ and
+ im 1
+ and
+ neg
+
+
+ /* high: upper 31-bit diff is only wrong when diff is 0 and low=-1
+ high=x>>1 - y>>1 + low
+
+ extremes
+
+ 0000 - 1111:
+ low= neg(not 0 and 1) = 1111 (-1)
+ high=000+ neg(111) +low = 000 + 1001 + low = 1000
+ OK
+
+ 1111 - 0000
+ low=neg(not 1 and 0) = 0
+ high=111+neg(000) + low = 0111
+ OK
+
+
+ */
+ loadsp 8
+
+ flip
+ addsp 0
+ flip
+
+ loadsp 8
+
+ flip
+ addsp 0
+ flip
+
+ sub
+
+ ; if they are equal, then the last bit decides...
+ add
+
+ /* test if negative: result = flip(diff) & 1 */
+ flip
+ im 1
+ and
+
+ ; destroy a&b which are on stack
+ storesp 4
+ storesp 4
+
+ storesp 12
+ storesp 4
+ poppc
+
+# opcode 39
+# offset 0x0000 00e0
+ .balign 32,0
+_ulessthanorequal:
+ loadsp 8
+ loadsp 8
+ ulessthan
+ loadsp 12
+ loadsp 12
+ eq
+ or
+
+ storesp 12
+ storesp 4
+ poppc
+
+
+# opcode 40
+# offset 0x0000 0100
+ .balign 32,0
+ .globl _swap
+_swap:
+ breakpoint ; tbd
+
+# opcode 41
+# offset 0x0000 0120
+ .balign 32,0
+_slowmult:
+ im _slowmultImpl
+ poppc
+
+# opcode 42
+# offset 0x0000 0140
+ .balign 32,0
+_lshiftright:
+ loadsp 8
+ flip
+
+ loadsp 8
+ ashiftleft
+ flip
+
+ storesp 12
+ storesp 4
+
+ poppc
+
+
+# opcode 43
+# offset 0x0000 0160
+ .balign 32,0
+_ashiftleft:
+ loadsp 8
+
+ loadsp 8
+ im 0x1f
+ and
+ fast_neg
+ im _ashiftleftEnd
+ add
+ poppc
+
+
+
+# opcode 44
+# offset 0x0000 0180
+ .balign 32,0
+_ashiftright:
+ loadsp 8
+ loadsp 8
+ lshiftright
+
+ ; handle signed value
+ im -1
+ loadsp 12
+ im 0x1f
+ and
+ lshiftright
+ not ; now we have an integer on the stack with the signed
+ ; bits in the right position
+
+ ; mask these bits with the signed bit.
+ loadsp 16
+ not
+ flip
+ im 1
+ and
+ im -1
+ add
+
+ and
+
+ ; stuff in the signed bits...
+ or
+
+ ; store result into correct stack slot
+ storesp 12
+
+ ; move up return value
+ storesp 4
+ poppc
+
+# opcode 45
+# offset 0x0000 01a0
+ .balign 32,0
+_call:
+ ; fn
+ loadsp 4
+
+ ; return address
+ loadsp 4
+
+ ; store return address
+ storesp 12
+
+ ; fn to call
+ storesp 4
+
+ pushsp ; flush internal stack
+ popsp
+
+ poppc
+
+_storehtail:
+
+ and
+ loadsp 12
+ im 3
+ and
+ fast_neg
+ im 2
+ add
+ im 3
+ ashiftleft
+ nop
+ ashiftleft
+
+ or
+
+ loadsp 8
+ im ~0x3
+ and
+
+ store
+
+ storesp 4
+ storesp 4
+ poppc
+
+
+# opcode 46
+# offset 0x0000 01c0
+ .balign 32,0
+_eq:
+ loadsp 8
+ fast_neg
+ loadsp 8
+ add
+
+ not
+ loadsp 0
+ im 1
+ add
+ not
+ and
+ flip
+ im 1
+ and
+
+ storesp 12
+ storesp 4
+ poppc
+
+# opcode 47
+# offset 0x0000 01e0
+ .balign 32,0
+_neq:
+ loadsp 8
+ fast_neg
+ loadsp 8
+ add
+
+ not
+ loadsp 0
+ im 1
+ add
+ not
+ and
+ flip
+
+ not
+
+ im 1
+ and
+
+ storesp 12
+ storesp 4
+ poppc
+
+
+# opcode 48
+# offset 0x0000 0200
+ .balign 32,0
+_neg:
+ loadsp 4
+ not
+ im 1
+ add
+ storesp 8
+
+ poppc
+
+
+# opcode 49
+# offset 0x0000 0220
+ .balign 32,0
+_sub:
+ loadsp 8
+ loadsp 8
+ fast_neg
+ add
+ storesp 12
+
+ storesp 4
+
+ poppc
+
+
+# opcode 50
+# offset 0x0000 0240
+ .balign 32,0
+_xor:
+ loadsp 8
+ not
+ loadsp 8
+ and
+
+ loadsp 12
+ loadsp 12
+ not
+ and
+
+ or
+
+ storesp 12
+ storesp 4
+ poppc
+
+# opcode 51
+# offset 0x0000 0260
+ .balign 32,0
+_loadb:
+ loadsp 4
+ im ~0x3
+ and
+ load
+
+ loadsp 8
+ im 3
+ and
+ fast_neg
+ im 3
+ add
+ ; x8
+ addsp 0
+ addsp 0
+ addsp 0
+
+ lshiftright
+
+ im 0xff
+ and
+ storesp 8
+
+ poppc
+
+
+# opcode 52
+# offset 0x0000 0280
+ .balign 32,0
+_storeb:
+ loadsp 4
+ im ~0x3
+ and
+ load
+
+ ; mask away destination
+ im _mask
+ loadsp 12
+ im 3
+ and
+ addsp 0
+ addsp 0
+ add
+ load
+
+ and
+
+
+ im _storebtail
+ poppc
+
+# opcode 53
+# offset 0x0000 02a0
+ .balign 32,0
+_div:
+ jmp ___div
+
+# opcode 54
+# offset 0x0000 02c0
+ .balign 32,0
+_mod:
+ jmp ___mod
+
+# opcode 55
+# offset 0x0000 02e0
+ .balign 32,0
+ .globl _eqbranch
+_eqbranch:
+ loadsp 8
+
+ ; eq
+
+ not
+ loadsp 0
+ im 1
+ add
+ not
+ and
+ flip
+ im 1
+ and
+
+ ; mask
+ im -1
+ add
+ loadsp 0
+ storesp 16
+
+ ; no branch address
+ loadsp 4
+
+ and
+
+ ; fetch boolean & neg mask
+ loadsp 12
+ not
+
+ ; calc address & mask for branch
+ loadsp 8
+ loadsp 16
+ add
+ ; subtract 1 to find PC of branch instruction
+ im -1
+ add
+
+ and
+
+ or
+
+ storesp 4
+ storesp 4
+ storesp 4
+ poppc
+
+
+# opcode 56
+# offset 0x0000 0300
+ .balign 32,0
+ .globl _neqbranch
+_neqbranch:
+ loadsp 8
+
+ ; neq
+
+ not
+ loadsp 0
+ im 1
+ add
+ not
+ and
+ flip
+
+ not
+
+ im 1
+ and
+
+ ; mask
+ im -1
+ add
+ loadsp 0
+ storesp 16
+
+ ; no branch address
+ loadsp 4
+
+ and
+
+ ; fetch boolean & neg mask
+ loadsp 12
+ not
+
+ ; calc address & mask for branch
+ loadsp 8
+ loadsp 16
+ add
+ ; find address of branch instruction
+ im -1
+ add
+
+ and
+
+ or
+
+ storesp 4
+ storesp 4
+ storesp 4
+ poppc
+
+# opcode 57
+# offset 0x0000 0320
+ .balign 32,0
+ .globl _poppcrel
+_poppcrel:
+ add
+ ; address of poppcrel
+ im -1
+ add
+ poppc
+
+# opcode 58
+# offset 0x0000 0340
+ .balign 32,0
+ .globl _config
+_config:
+ im 1
+ nop
+ im _hardware
+ store
+ storesp 4
+ poppc
+
+# opcode 59
+# offset 0x0000 0360
+ .balign 32,0
+_pushpc:
+ loadsp 4
+ im 1
+ add
+ storesp 8
+ poppc
+
+# opcode 60
+# offset 0x0000 0380
+ .balign 32,0
+_syscall_emulate:
+ .byte 0
+
+# opcode 61
+# offset 0x0000 03a0
+ .balign 32,0
+_pushspadd:
+ pushsp
+ im 4
+ add
+ loadsp 8
+ addsp 0
+ addsp 0
+ add
+ storesp 8
+
+ poppc
+
+# opcode 62
+# offset 0x0000 03c0
+ .balign 32,0
+_halfmult:
+ breakpoint
+
+# opcode 63
+# offset 0x0000 03e0
+ .balign 32,0
+_callpcrel:
+ loadsp 4
+ loadsp 4
+ add
+ im -1
+ add
+ loadsp 4
+
+ storesp 12 ; return address
+ storesp 4
+ pushsp ; this will flush the internal stack.
+ popsp
+ poppc
+
+ .text
+
+
+
+
+_ashiftleftBegin:
+ .rept 0x1f
+ addsp 0
+ .endr
+_ashiftleftEnd:
+ storesp 12
+ storesp 4
+ poppc
+
+_storebtail:
+ loadsp 12
+ im 0xff
+ and
+ loadsp 12
+ im 3
+ and
+
+ fast_neg
+ im 3
+ add
+ ; x8
+ addsp 0
+ addsp 0
+ addsp 0
+
+ ashiftleft
+
+ or
+
+ loadsp 8
+ im ~0x3
+ and
+
+ store
+
+ storesp 4
+ storesp 4
+ poppc
+
+
+
+
+; NB! this is not an EMULATE instruction. It is a varargs fn.
+ .globl _syscall
+_syscall:
+ syscall
+ poppc
+
+_slowmultImpl:
+
+ loadsp 8 ; A
+ loadsp 8 ; B
+ im 0 ; C
+
+.LmoreMult:
+ mult1bit
+
+ ; cutoff
+ loadsp 8
+ .byte (.LmoreMult-.Lbranch)&0x7f+0x80
+.Lbranch:
+ neqbranch
+
+ storesp 4
+ storesp 4
+ storesp 12
+ storesp 4
+ poppc
+
+___mod:
+ cimpl __modsi3
+___div:
+ cimpl __divsi3
+___zpu_interrupt_vector:
+ jsr _zpu_interrupt
+ poppc
+
+ .data
+ .balign 4,0
+_mask:
+ .long 0x00ffffff
+ .long 0xff00ffff
+ .long 0xffff00ff
+ .long 0xffffff00
+
+
+ .globl _hardware
+_hardware:
+ .long 0
+ .globl _cpu_config
+_cpu_config:
+ .long 0
+
diff --git a/hw_s3estarter/software/test/crt0_phi.S b/hw_s3estarter/software/test/crt0_phi.S
new file mode 100644
index 0000000..240edf3
--- /dev/null
+++ b/hw_s3estarter/software/test/crt0_phi.S
@@ -0,0 +1,179 @@
+/* Startup code for ZPU
+ Copyright (C) 2005 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+ .file "crt0.S"
+
+
+
+
+; .section ".fixed_vectors","ax"
+; KLUDGE!!! we remove the executable bit to avoid relaxation
+ .section ".fixed_vectors","a"
+
+; DANGER!!!!
+; we need to align these code sections to 32 bytes, which
+; means we must not use any assembler instructions that are relaxed
+; at linker time
+; DANGER!!!!
+
+ .macro fixedim value
+ im \value
+ .endm
+
+ .macro jsr address
+
+ im 0 ; save R0
+ load
+ im 4 ; save R1
+ load
+ im 8 ; save R2
+ load
+
+ fixedim \address
+ call
+
+ im 8
+ store ; restore R2
+ im 4
+ store ; restore R1
+ im 0
+ store ; restore R0
+ .endm
+
+
+ .macro jmp address
+ fixedim \address
+ poppc
+ .endm
+
+
+ .macro fast_neg
+ not
+ im 1
+ add
+ .endm
+
+ .macro cimpl funcname
+ ; save R0
+ im 0
+ load
+
+ ; save R1
+ im 4
+ load
+
+ ; save R2
+ im 8
+ load
+
+ loadsp 20
+ loadsp 20
+
+ fixedim \funcname
+ call
+
+ ; destroy arguments on stack
+ storesp 0
+ storesp 0
+
+ im 0
+ load
+
+ ; poke the result into the right slot
+ storesp 24
+
+ ; restore R2
+ im 8
+ store
+
+ ; restore R1
+ im 4
+ store
+
+ ; restore r0
+ im 0
+ store
+
+
+ storesp 4
+ poppc
+ .endm
+
+ .macro mult1bit
+ ; create mask of lowest bit in A
+ loadsp 8 ; A
+ im 1
+ and
+ im -1
+ add
+ not
+ loadsp 8 ; B
+ and
+ add ; accumulate in C
+
+ ; shift B left 1 bit
+ loadsp 4 ; B
+ addsp 0
+ storesp 8 ; B
+
+ ; shift A right 1 bit
+ loadsp 8 ; A
+ flip
+ addsp 0
+ flip
+ storesp 12 ; A
+ .endm
+
+
+
+/* vectors */
+ .balign 32,0
+# offset 0x0000 0000
+ .globl _start
+_start:
+ ; intSp must be 0 when we jump to _premain
+
+ im ZPU_ID
+ loadsp 0
+ im _cpu_config
+ store
+ config
+ /*jmp _premain BLa*/
+ jmp main
+
+
+
+/* instruction emulation code */
+
+ .data
+
+
+ .globl _hardware
+_hardware:
+ .long 0
+ .globl _cpu_config
+_cpu_config:
+ .long 0
+
diff --git a/hw_s3estarter/software/test/linker_script.x b/hw_s3estarter/software/test/linker_script.x
new file mode 100644
index 0000000..f704521
--- /dev/null
+++ b/hw_s3estarter/software/test/linker_script.x
@@ -0,0 +1,193 @@
+/* Default linker script, for normal executables */
+OUTPUT_FORMAT("elf32-zpu", "elf32-zpu",
+ "elf32-zpu")
+OUTPUT_ARCH(zpu)
+SEARCH_DIR(.);
+ENTRY(_start)
+SEARCH_DIR("/home/oyvind/toolchain/toolchain/build/../install/zpu-elf/lib");
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x0); . = 0x0;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .fixed_vectors :
+ {
+ . = .;
+ KEEP (*(.fixed_vectors))
+ } =0
+ .init :
+ {
+ KEEP (*(.init))
+ } =0
+ .plt : { *(.plt) }
+ .text :
+ {
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7)
+ } =0
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN(1) + (. & (1 - 1));
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE (__preinit_array_start = .);
+ .preinit_array : { *(.preinit_array) }
+ PROVIDE (__preinit_array_end = .);
+ PROVIDE (__init_array_start = .);
+ .init_array : { *(.init_array) }
+ PROVIDE (__init_array_end = .);
+ PROVIDE (__fini_array_start = .);
+ .fini_array : { *(.fini_array) }
+ PROVIDE (__fini_array_end = .);
+ .data :
+ {
+ __data_start = . ;
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .eh_frame : { KEEP (*(.eh_frame)) }
+ .gcc_except_table : { *(.gcc_except_table) }
+ .dynamic : { *(.dynamic) }
+ .ctors :
+ {
+ ___ctors = .;
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ from the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ ___ctors_end = .;
+ }
+ .dtors :
+ {
+ ___dtors = .;
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ ___dtors_end = .;
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .got : { *(.got.plt) *(.got) }
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ . = ALIGN(32 / 8);
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ .stack 0xa0001000-0x04 :
+ {
+ _stack = .;
+ *(.stack)
+ }
+ .note.gnu.zpu.ident 0 : { KEEP (*(.note.gnu.zpu.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) }
+}
+
diff --git a/hw_s3estarter/software/test/test.ld b/hw_s3estarter/software/test/test.ld
new file mode 100644
index 0000000..6018e32
--- /dev/null
+++ b/hw_s3estarter/software/test/test.ld
@@ -0,0 +1,44 @@
+OUTPUT_FORMAT("elf32-zpu", "elf32-zpu", "elf32-zpu")
+OUTPUT_ARCH(zpu)
+SEARCH_DIR(.);
+
+ENTRY(_start)
+
+MEMORY
+{
+ ahbrom (rx) : ORIGIN = 0x00000000, LENGTH = 8K
+ ahbram (rwx) : ORIGIN = 0x00100000, LENGTH = 4K
+}
+
+/* for sbrk.c:37 */
+/*_stack = ORIGIN(ahbram)+LENGTH(ahbram); */
+_stack = 0x00101000;
+
+SECTIONS
+{
+ .text :
+ {
+ *(.fixed_vectors)
+ *(.text .text.*)
+ *(.rodata .rodata.*)
+ } > ahbrom
+
+
+ .data :
+ AT ( ADDR(.text) + SIZEOF(.text) )
+ {
+ *(.data .data.* )
+ } >ahbram
+
+ .bss :
+ {
+ *(.bss .bss.*)
+ . = ALIGN(4);
+ } >ahbram
+ . = ALIGN(4);
+ _bss_end = . ;
+ _end = . ; /* for sbrk.c:33 */
+
+}
+
+
OpenPOWER on IntegriCloud