/* callwrap.S - wrapper for x86_64 efi calls */
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2006,2007,2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see .
*/
#include
#include
/*
* x86_64 uses registry to pass parameters. Unfortunately, gcc and efi use
* different call conversion, so we need to do some conversion.
*
* gcc:
* %rdi, %esi, %rdx, %rcx, %r8, %r9, 8(%rsp), 16(%rsp), ...
*
* efi:
* %rcx, %rdx, %r8, %r9, 32(%rsp), 40(%rsp), 48(%rsp), ...
*
*/
.file "callwrap.S"
.text
FUNCTION(efi_wrap_0)
subq $48, %rsp
call *%rdi
addq $48, %rsp
ret
FUNCTION(efi_wrap_1)
subq $48, %rsp
mov %rsi, %rcx
call *%rdi
addq $48, %rsp
ret
FUNCTION(efi_wrap_2)
subq $48, %rsp
mov %rsi, %rcx
call *%rdi
addq $48, %rsp
ret
FUNCTION(efi_wrap_3)
subq $48, %rsp
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $48, %rsp
ret
FUNCTION(efi_wrap_4)
subq $48, %rsp
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $48, %rsp
ret
FUNCTION(efi_wrap_5)
subq $48, %rsp
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $48, %rsp
ret
FUNCTION(efi_wrap_6)
subq $64, %rsp
mov 64+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $64, %rsp
ret
FUNCTION(efi_wrap_7)
subq $96, %rsp
mov 96+16(%rsp), %rax
mov %rax, 48(%rsp)
mov 96+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $96, %rsp
ret
FUNCTION(efi_wrap_10)
subq $96, %rsp
mov 96+40(%rsp), %rax
mov %rax, 72(%rsp)
mov 96+32(%rsp), %rax
mov %rax, 64(%rsp)
mov 96+24(%rsp), %rax
mov %rax, 56(%rsp)
mov 96+16(%rsp), %rax
mov %rax, 48(%rsp)
mov 96+8(%rsp), %rax
mov %rax, 40(%rsp)
mov %r9, 32(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
call *%rdi
addq $96, %rsp
ret