* grub-core/kern/x86_64/efi/callwrap.S (efi_wrap_0): Preserve 16-byte

stack alignment.
	(efi_wrap_1): Likewise.
	(efi_wrap_2): Likewise.
	(efi_wrap_3): Likewise.
	(efi_wrap_4): Likewise.
	(efi_wrap_5): Likewise.
	(efi_wrap_6): Likewise.
	(efi_wrap_10): Likewise.
	Based on information by: Red Hat/Peter Jones.
This commit is contained in:
Vladimir 'phcoder' Serbinenko 2011-03-31 16:48:36 +02:00
parent a8afc1d12c
commit 91dc607330
2 changed files with 35 additions and 22 deletions

View file

@ -1,3 +1,16 @@
2011-03-31 Vladimir Serbinenko <phcoder@gmail.com>
* grub-core/kern/x86_64/efi/callwrap.S (efi_wrap_0): Preserve 16-byte
stack alignment.
(efi_wrap_1): Likewise.
(efi_wrap_2): Likewise.
(efi_wrap_3): Likewise.
(efi_wrap_4): Likewise.
(efi_wrap_5): Likewise.
(efi_wrap_6): Likewise.
(efi_wrap_10): Likewise.
Based on information by: Red Hat/Peter Jones.
2011-03-31 Colin Watson <cjwatson@ubuntu.com>
* grub-core/mmap/efi/mmap.c (grub_mmap_unregister): Remove

View file

@ -37,80 +37,80 @@
.text
FUNCTION(efi_wrap_0)
subq $40, %rsp
subq $48, %rsp
call *%rdi
addq $40, %rsp
addq $48, %rsp
ret
FUNCTION(efi_wrap_1)
subq $40, %rsp
subq $48, %rsp
mov %rsi, %rcx
call *%rdi
addq $40, %rsp
addq $48, %rsp
ret
FUNCTION(efi_wrap_2)
subq $40, %rsp
subq $48, %rsp
mov %rsi, %rcx
call *%rdi
addq $40, %rsp
addq $48, %rsp
ret
FUNCTION(efi_wrap_3)
subq $40, %rsp
subq $48, %rsp
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $40, %rsp
addq $48, %rsp
ret
FUNCTION(efi_wrap_4)
subq $40, %rsp
subq $48, %rsp
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $40, %rsp
addq $48, %rsp
ret
FUNCTION(efi_wrap_5)
subq $40, %rsp
subq $48, %rsp
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $40, %rsp
addq $48, %rsp
ret
FUNCTION(efi_wrap_6)
subq $56, %rsp
mov 56+8(%rsp), %rax
subq $64, %rsp
mov 64+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $56, %rsp
addq $64, %rsp
ret
FUNCTION(efi_wrap_10)
subq $88, %rsp
mov 88+40(%rsp), %rax
subq $96, %rsp
mov 96+40(%rsp), %rax
mov %rax, 72(%rsp)
mov 88+32(%rsp), %rax
mov 96+32(%rsp), %rax
mov %rax, 64(%rsp)
mov 88+24(%rsp), %rax
mov 96+24(%rsp), %rax
mov %rax, 56(%rsp)
mov 88+16(%rsp), %rax
mov 96+16(%rsp), %rax
mov %rax, 48(%rsp)
mov 88+8(%rsp), %rax
mov 96+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $88, %rsp
addq $96, %rsp
ret