You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by gs...@apache.org on 2007/05/28 18:20:59 UTC

svn commit: r542258 - /harmony/enhanced/drlvm/trunk/vm/interpreter/src/invokeJNI_em64t.asm

Author: gshimansky
Date: Mon May 28 09:20:59 2007
New Revision: 542258

URL: http://svn.apache.org/viewvc?view=rev&rev=542258
Log:
The bug appears to be in interpreter. It is not documented, but it looks like x86_64 calling convention requires to have rsp to be always aligned on 16 bytes. When the number of stack arguments is odd, rsp is not aligned on 16 bytes. The same bug was fixed for windows x86_64 at revision 514264. Now this bug appeared on linux too because gcc generates movaps instructions for code which uses va_args. Such native function in EHWA scenario is FcPatternBuild. The movaps instruction requires alignment on 16 bytes. So I've made a patch similar to what I did for windows x86_64 for Linux too.


Modified:
    harmony/enhanced/drlvm/trunk/vm/interpreter/src/invokeJNI_em64t.asm

Modified: harmony/enhanced/drlvm/trunk/vm/interpreter/src/invokeJNI_em64t.asm
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/interpreter/src/invokeJNI_em64t.asm?view=diff&rev=542258&r1=542257&r2=542258
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/interpreter/src/invokeJNI_em64t.asm (original)
+++ harmony/enhanced/drlvm/trunk/vm/interpreter/src/invokeJNI_em64t.asm Mon May 28 09:20:59 2007
@@ -15,18 +15,18 @@
 //
 //   Author: Ivan Volosyuk
 //
-	.text
-	.align 2
+    .text
+    .align 2
 .globl invokeJNI
-	.type	invokeJNI, @function
+    .type    invokeJNI, @function
 invokeJNI:
 //  rdi - memory
 //  rsi - n fp args
 //  rdx - n mem args
 //  rcx - function ptr
 
-	push	%rbp
-	mov	%rsp, %rbp
+    push %rbp
+    mov %rsp, %rbp
 
 // cycle to fill all fp args
     movq 8(%rdi), %xmm0
@@ -38,17 +38,26 @@
     movq 56(%rdi), %xmm6
     movq 64(%rdi), %xmm7
 
+    mov %rsp, %r10 // Check that stack is aligned on
+    and $8, %r10   // 16 bytes. This code may be removed
+    jz no_abort    // when we are sure that compiler always
+    int3           // calls us with aligned stack
+no_abort:
+    mov %rdx, %r10 // Align stack on 16 bytes before pushing
+    and $1, %r10   // stack arguments in case we have an odd
+    shl $3, %r10   // number of stack arguments
+    sub %r10, %rsp
 // store memory args
-	movq %rcx, %r10 // func ptr
-	movq %rdx, %rcx // counter
-	lea	8+64+48-8(%rdi,%rcx,8), %rdx
-	sub	%rsp, %rdx
+    movq %rcx, %r10 // func ptr
+    movq %rdx, %rcx // counter
+    lea 8+64+48-8(%rdi,%rcx,8), %rdx
+    sub %rsp, %rdx
     cmpq $0, %rcx
-    jz 2f
-1:
-	push	0(%rsp,%rdx)
-	loop 1b
-2:
+    jz cycle_end
+cycle:
+    push 0(%rsp,%rdx)
+    loop cycle
+cycle_end:
     movq 80(%rdi), %rsi
     movq 88(%rdi), %rdx
     movq 96(%rdi), %rcx
@@ -57,9 +66,9 @@
 
     movq 72(%rdi), %rdi
 
-	call	*%r10
-	leave
-	ret
+    call *%r10
+    leave
+    ret