[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [MiNT] MiNTLib for ColdFire
Now here is a patch for adding ColdFire support to most assembler files of
the MiNTLib (patches for floating point routines are still missing, as well
as - maybe - osbind.S)
Once again, this patch is clean and will not break the existing code. I
checked carefully the new ColdFire code, it looks OK for me. Any review will
be appreciated.
Alan, could you commit, please ?
--
Vincent Rivière
diff -aurN -x CVS mintlib.orig/ChangeLog mintlib/ChangeLog
--- mintlib.orig/ChangeLog 2009-05-18 22:31:48.796875000 +0200
+++ mintlib/ChangeLog 2009-05-21 11:02:22.703125000 +0200
@@ -1,3 +1,10 @@
+2009-05-21 Thursday 11:02 Vincent Riviere <vincent.riviere@freesbee.fr>
+
+ * mintlib/libc_exit.S, mintlib/setjmp.S, string/bcopy.S,
+ string/bzero.S, unix/vfork.S
+
+ Added ColdFire support to most assembler files (except FPU).
+
2009-05-18 Monday 20:51 Vincent Riviere <vincent.riviere@freesbee.fr>
* mintlib/include/compiler.h, mintlib/include/macros.h,
diff -aurN -x CVS mintlib.orig/mintlib/libc_exit.S mintlib/mintlib/libc_exit.S
--- mintlib.orig/mintlib/libc_exit.S 2000-10-12 13:30:47.000000000 +0200
+++ mintlib/mintlib/libc_exit.S 2009-05-18 23:00:27.281250000 +0200
@@ -18,7 +18,12 @@
___libc_exit:
link a6,#0
- moveml #0x3020,sp@-
+#ifdef __mcoldfire__
+ lea sp@(-12),sp
+ moveml d5/a4/a5,sp@
+#else
+ moveml d5/a4/a5,sp@-
+#endif
#ifndef __MSHORT__
movel a6@(8),d3
#else
diff -aurN -x CVS mintlib.orig/mintlib/setjmp.S mintlib/mintlib/setjmp.S
--- mintlib.orig/mintlib/setjmp.S 2009-05-18 20:54:50.671875000 +0200
+++ mintlib/mintlib/setjmp.S 2009-05-20 23:20:59.921875000 +0200
@@ -42,7 +42,11 @@
movel sp@+, a0 | restore register a0
nomint:
+#ifdef __mcoldfire__
+ orl #1,d0 | make it != 0 (SIGNULL is unmaskable)
+#else
orw #1,d0 | make it != 0 (SIGNULL is unmaskable)
+#endif
movel d0, a0@(52) | save signal mask
jra SETJMP | call common code
@@ -73,7 +77,11 @@
movel sp@(4),a0 | address of jmp_buf[]
movel a0@(52),d0 | want to restore sigmask?
jeq NORESTORE | no -- skip restore code
+#ifdef __mcoldfire__
+ andl #-2,d0
+#else
andw #-2,d0
+#endif
movel d0, Sigmask | restore tos emulation signal mask
#ifdef __MSHORT__
tstw Mint | see if MiNT is active
diff -aurN -x CVS mintlib.orig/string/bcopy.S mintlib/string/bcopy.S
--- mintlib.orig/string/bcopy.S 2000-10-12 12:56:59.000000000 +0200
+++ mintlib/string/bcopy.S 2009-05-21 10:57:25.171875000 +0200
@@ -67,9 +67,15 @@
cmpl a0,a1
jgt top_down
+#ifdef __mcoldfire__
+ movl a0,d1 | test for alignment
+ movl a1,d2
+ eorl d2,d1
+#else
movw a0,d1 | test for alignment
movw a1,d2
eorw d2,d1
+#endif
btst #0,d1 | one odd one even ?
jne slow_copy
btst #0,d2 | both even ?
@@ -77,10 +83,29 @@
movb a0@+,a1@+ | copy one byte, now we are both even
subql #1,d0
both_even:
- clrw d1 | save length less 256
+ movq #0,d1 | save length less 256
movb d0,d1
lsrl #8,d0 | number of 256 bytes blocks
jeq less256
+#ifdef __mcoldfire__
+ lea sp@(-40),sp
+ movml d1/d3-d7/a2/a3/a5/a6,sp@ | d2 is already saved
+ | exclude a4 because of -mbaserel
+copy256:
+ movml a0@,d1-d7/a2/a3/a5/a6 | copy 5*44+36=256 bytes
+ movml d1-d7/a2/a3/a5/a6,a1@
+ movml a0@(44),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(44)
+ movml a0@(88),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(88)
+ movml a0@(132),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(132)
+ movml a0@(176),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(176)
+ movml a0@(220),d1-d7/a2-a3
+ movml d1-d7/a2-a3,a1@(220)
+ lea a0@(256),a0
+#else
movml d1/d3-d7/a2/a3/a5/a6,sp@- | d2 is already saved
| exclude a4 because of -mbaserel
copy256:
@@ -96,9 +121,25 @@
movml d1-d7/a2/a3/a5/a6,a1@(176)
movml a0@+,d1-d7/a2-a3
movml d1-d7/a2-a3,a1@(220)
+#endif
lea a1@(256),a1 | increment dest, src is already
subql #1,d0
jne copy256 | next, please
+#ifdef __mcoldfire__
+ movml sp@,d1/d3-d7/a2/a3/a5/a6
+ lea sp@(40),sp
+less256: | copy 16 bytes blocks
+ movl d1,d0
+ lsrl #2,d0 | number of 4 bytes blocks
+ jeq less4 | less that 4 bytes left
+ movl d0,d2
+ negl d2
+ andil #3,d2 | d2 = number of bytes below 16 (-n)&3
+ subql #1,d0
+ lsrl #2,d0 | number of 16 bytes blocks minus 1, if d2==0
+ addl d2,d2 | offset in code (movl two bytes)
+ jmp pc@(2,d2:l) | jmp into loop
+#else
movml sp@+,d1/d3-d7/a2/a3/a5/a6
less256: | copy 16 bytes blocks
movw d1,d0
@@ -111,12 +152,18 @@
lsrw #2,d0 | number of 16 bytes blocks minus 1, if d2==0
addw d2,d2 | offset in code (movl two bytes)
jmp pc@(2,d2:w) | jmp into loop
+#endif
copy16:
movl a0@+,a1@+
movl a0@+,a1@+
movl a0@+,a1@+
movl a0@+,a1@+
+#ifdef __mcoldfire__
+ subql #1,d0
+ bpl copy16
+#else
dbra d0,copy16
+#endif
less4:
btst #1,d1
jeq less2
@@ -133,6 +180,15 @@
rts
slow_copy: | byte by bytes copy
+#ifdef __mcoldfire__
+ movl d0,d1
+ negl d1
+ andil #7,d1 | d1 = number of bytes blow 8 (-n)&7
+ addql #7,d0
+ lsrl #3,d0 | number of 8 bytes block plus 1, if d1!=0
+ addl d1,d1 | offset in code (movb two bytes)
+ jmp pc@(2,d1:l) | jump into loop
+#else
movw d0,d1
negw d1
andiw #7,d1 | d1 = number of bytes blow 8 (-n)&7
@@ -140,6 +196,7 @@
lsrl #3,d0 | number of 8 bytes block plus 1, if d1!=0
addw d1,d1 | offset in code (movb two bytes)
jmp pc@(2,d1:w) | jump into loop
+#endif
scopy:
movb a0@+,a1@+
movb a0@+,a1@+
@@ -157,9 +214,15 @@
addl d0,a0 | a0 byte after end of src
addl d0,a1 | a1 byte after end of dest
+#ifdef __mcoldfire__
+ movl a0,d1 | exact the same as above, only with predec
+ movl a1,d2
+ eorl d2,d1
+#else
movw a0,d1 | exact the same as above, only with predec
movw a1,d2
eorw d2,d1
+#endif
btst #0,d1
jne slow_copy_d
@@ -168,10 +231,28 @@
movb a0@-,a1@-
subql #1,d0
both_even_d:
- clrw d1
+ movq #0,d1
movb d0,d1
lsrl #8,d0
jeq less256_d
+#ifdef __mcoldfire__
+ lea sp@(-40),sp
+ movml d1/d3-d7/a2/a3/a5/a6,sp@
+copy256_d:
+ movml a0@(-44),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(-44)
+ movml a0@(-88),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(-88)
+ movml a0@(-132),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(-132)
+ movml a0@(-176),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(-176)
+ movml a0@(-220),d1-d7/a2/a3/a5/a6
+ movml d1-d7/a2/a3/a5/a6,a1@(-220)
+ movml a0@(-256),d1-d7/a2-a3
+ movml d1-d7/a2-a3,a1@(-256)
+ lea a1@(-256),a1
+#else
movml d1/d3-d7/a2/a3/a5/a6,sp@-
copy256_d:
movml a0@(-44),d1-d7/a2/a3/a5/a6
@@ -186,9 +267,25 @@
movml d1-d7/a2/a3/a5/a6,a1@-
movml a0@(-256),d1-d7/a2-a3
movml d1-d7/a2-a3,a1@-
+#endif
lea a0@(-256),a0
subql #1,d0
jne copy256_d
+#ifdef __mcoldfire__
+ movml sp@,d1/d3-d7/a2/a3/a5/a6
+ lea sp@(40),sp
+less256_d:
+ movl d1,d0
+ lsrl #2,d0
+ jeq less4_d
+ movl d0,d2
+ negl d2
+ andil #3,d2
+ subql #1,d0
+ lsrl #2,d0
+ addl d2,d2
+ jmp pc@(2,d2:l)
+#else
movml sp@+,d1/d3-d7/a2/a3/a5/a6
less256_d:
movw d1,d0
@@ -201,12 +298,18 @@
lsrw #2,d0
addw d2,d2
jmp pc@(2,d2:w)
+#endif
copy16_d:
movl a0@-,a1@-
movl a0@-,a1@-
movl a0@-,a1@-
movl a0@-,a1@-
+#ifdef __mcoldfire__
+ subql #1,d0
+ bpl copy16_d
+#else
dbra d0,copy16_d
+#endif
less4_d:
btst #1,d1
jeq less2_d
@@ -217,6 +320,15 @@
movb a0@-,a1@-
jra exit_d2
slow_copy_d:
+#ifdef __mcoldfire__
+ movl d0,d1
+ negl d1
+ andil #7,d1
+ addql #7,d0
+ lsrl #3,d0
+ addl d1,d1
+ jmp pc@(2,d1:l)
+#else
movw d0,d1
negw d1
andiw #7,d1
@@ -224,6 +336,7 @@
lsrl #3,d0
addw d1,d1
jmp pc@(2,d1:w)
+#endif
scopy_d:
movb a0@-,a1@-
movb a0@-,a1@-
diff -aurN -x CVS mintlib.orig/string/bzero.S mintlib/string/bzero.S
--- mintlib.orig/string/bzero.S 2000-10-12 12:56:59.000000000 +0200
+++ mintlib/string/bzero.S 2009-05-21 10:21:07.156250000 +0200
@@ -74,17 +74,26 @@
subql #1,d1
areeven:
movb d0,d2
+#ifdef __mcoldfire__
+ lsll #8,d0
+#else
lslw #8,d0
+#endif
movb d2,d0
movw d0,d2
swap d2
movw d0,d2 | d2 has byte now four times
- clrw d0 | save length less 256
+ movq #0,d0 | save length less 256
movb d1,d0
lsrl #8,d1 | number of 256 bytes blocks
jeq less256
+#ifdef __mcoldfire__
+ lea sp@(-40),sp
+ movml d0/d3-d7/a2/a3/a5/a6,sp@ | d2 is already saved
+#else
movml d0/d3-d7/a2/a3/a5/a6,sp@- | d2 is already saved
+#endif
| exclude a4 because of -mbaserel
movl d2,d0
movl d2,d3
@@ -97,17 +106,43 @@
movl d2,a5
movl d2,a6
set256:
+#ifdef __mcoldfire__
+ lea a0@(-256),a0
+ movml d0/d2-d7/a2/a3/a5/a6,a0@(212) | set 5*44+36=256 bytes
+ movml d0/d2-d7/a2/a3/a5/a6,a0@(168)
+ movml d0/d2-d7/a2/a3/a5/a6,a0@(124)
+ movml d0/d2-d7/a2/a3/a5/a6,a0@(80)
+ movml d0/d2-d7/a2/a3/a5/a6,a0@(36)
+ movml d0/d2-d7/a2-a3,a0@
+#else
movml d0/d2-d7/a2/a3/a5/a6,a0@- | set 5*44+36=256 bytes
movml d0/d2-d7/a2/a3/a5/a6,a0@-
movml d0/d2-d7/a2/a3/a5/a6,a0@-
movml d0/d2-d7/a2/a3/a5/a6,a0@-
movml d0/d2-d7/a2/a3/a5/a6,a0@-
movml d0/d2-d7/a2-a3,a0@-
+#endif
subql #1,d1
jne set256 | next, please
+#ifdef __mcoldfire__
+ movml sp@,d0/d3-d7/a2/a3/a5/a6
+ lea sp@(40),sp
+#else
movml sp@+,d0/d3-d7/a2/a3/a5/a6
+#endif
less256: | set 16 bytes blocks
movw d0,sp@- | save length below 256 for last 3 bytes
+#ifdef __mcoldfire__
+ lsrl #2,d0
+ jeq less4 | less that 4 bytes left
+ movl d0,d1
+ negl d1
+ andil #3,d1 | d1 = number of bytes below 16 (-n)&3
+ subql #1,d0
+ lsrl #2,d0 | number of 16 bytes blocks minus 1, if d1==0
+ addl d1,d1 | offset in code (movl two bytes)
+ jmp pc@(2,d1:l) | jmp into loop
+#else
lsrw #2,d0 | number of 4 bytes blocks
jeq less4 | less that 4 bytes left
movw d0,d1
@@ -117,12 +152,18 @@
lsrw #2,d0 | number of 16 bytes blocks minus 1, if d1==0
addw d1,d1 | offset in code (movl two bytes)
jmp pc@(2,d1:w) | jmp into loop
+#endif
set16:
movl d2,a0@-
movl d2,a0@-
movl d2,a0@-
movl d2,a0@-
+#ifdef __mcoldfire__
+ subql #1,d0
+ bpl set16
+#else
dbra d0,set16
+#endif
less4:
movw sp@+,d0
btst #1,d0
diff -aurN -x CVS mintlib.orig/unix/vfork.S mintlib/unix/vfork.S
--- mintlib.orig/unix/vfork.S 2001-07-19 23:01:01.000000000 +0200
+++ mintlib/unix/vfork.S 2009-05-20 23:23:42.828125000 +0200
@@ -37,12 +37,22 @@
jmi L_err
jmp a1@ | return
L_TOS:
+#ifdef __mcoldfire__
+ lea Vfsav, a0
+ moveml d2-d7/a1-a6, a0@ | save registers
+#else
moveml d2-d7/a1-a6, Vfsav | save registers
+#endif
pea Vfsav
pea pc@(L_newprog)
jbsr _tfork | tfork(L_newprog, L_vfsav)
addql #8, sp
+#ifdef __mcoldfire__
+ lea Vfsav, a0
+ moveml a0@, d2-d7/a1-a6 | restore reggies
+#else
moveml Vfsav, d2-d7/a1-a6 | restore reggies
+#endif
tstl d0 | fork went OK??
jmi L_err | no -- error
jmp a1@ | return to caller