[grub] Fix stack alignment on UEFI machines (replaces patch in -74) Resolves: rhbz#677468 Allow booting fro

Peter Jones pjones at fedoraproject.org
Wed Sep 28 23:32:12 UTC 2011


commit b9fed5420d3319f1cdf0e15b72d45ba432c69753
Author: Peter Jones <pjones at redhat.com>
Date:   Wed Aug 17 14:24:32 2011 -0400

    Fix stack alignment on UEFI machines (replaces patch in -74)
    Resolves: rhbz#677468
    Allow booting from higher drive numbers
    Resolves: rhbz#671355
    Conflict with grub2
    Resolves: rhbz#731226

 grub-0.97-128-drives-support.patch            |   73 +++
 grub-0.97-bz677468-properly-align-stack.patch |  716 +++++++++++++++++++++++++
 grub.spec                                     |   13 +-
 3 files changed, 801 insertions(+), 1 deletions(-)
---
diff --git a/grub-0.97-128-drives-support.patch b/grub-0.97-128-drives-support.patch
new file mode 100644
index 0000000..114d93d
--- /dev/null
+++ b/grub-0.97-128-drives-support.patch
@@ -0,0 +1,73 @@
+From 4f9000267ec45b611efd3c90f875cde09e71d511 Mon Sep 17 00:00:00 2001
+From: Jan Holcapek <holcapek at gmail.com>
+Date: Wed, 7 Oct 2009 07:23:13 -0400
+Subject: [PATCH] Patch allowing GRUB for recognizing up to 128 hard drives.
+
+GRUB can't recognize, ie. can't be installed and load stage2 loader off
+the 9th and further HDD. This patch allows them to do so.
+---
+ stage2/builtins.c |    2 +-
+ stage2/disk_io.c  |    4 ++--
+ stage2/shared.h   |    4 +++-
+ 3 files changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/stage2/builtins.c b/stage2/builtins.c
+index 6c6e3fa..02a35a9 100644
+--- a/stage2/builtins.c
++++ b/stage2/builtins.c
+@@ -1508,7 +1508,7 @@ find_func (char *arg, int flags)
+     }
+ 
+   /* Hard disks.  */
+-  for (drive = 0x80; drive < 0x88; drive++)
++  for (drive = 0x80; drive < (0x80 + MAX_HD_NUM); drive++)
+     {
+       unsigned long part = 0xFFFFFF;
+       unsigned long start, len, offset, ext_offset, gpt_offset;
+diff --git a/stage2/disk_io.c b/stage2/disk_io.c
+index e07ca77..dbf95f5 100644
+--- a/stage2/disk_io.c
++++ b/stage2/disk_io.c
+@@ -411,7 +411,7 @@ sane_partition (void)
+     return 1;
+   
+   if (!(current_partition & 0xFF000000uL)
+-      && ((current_drive & 0xFFFFFF7F) < 8
++      && ((current_drive & 0xFFFFFF7F) < MAX_HD_NUM
+ 	  || current_drive == cdrom_drive)
+       && (current_partition & 0xFF) == 0xFF
+       && ((current_partition & 0xFF00) == 0xFF00
+@@ -1495,7 +1495,7 @@ print_completions (int is_filename, int is_completion)
+ 		       i < (ptr && (*(ptr-1) == 'd' && *(ptr-2) == 'f') ? 1:2);
+ 		       i++)
+ 		    {
+-		      for (j = 0; j < 8; j++)
++		      for (j = 0; j < MAX_HD_NUM; j++)
+ 			{
+ 			  disk_no = (i * 0x80) + j;
+ 			  if ((disk_choice || disk_no == current_drive)
+diff --git a/stage2/shared.h b/stage2/shared.h
+index 587f9a1..3d1735c 100644
+--- a/stage2/shared.h
++++ b/stage2/shared.h
+@@ -80,6 +80,8 @@ extern void *grub_scratch_mem;
+ #define BIOSDISK_FLAG_LBA_EXTENSION	0x1
+ #define BIOSDISK_FLAG_CDROM		0x2
+ 
++#define MAX_HD_NUM	128
++
+ /*
+  *  This is the filesystem (not raw device) buffer.
+  *  It is 32K in size, do not overrun!
+@@ -128,7 +130,7 @@ extern void *grub_scratch_mem;
+ #define MENU_BUFLEN		(0x8000 + PASSWORD_BUF - MENU_BUF)
+ 
+ /* The size of the drive map.  */
+-#define DRIVE_MAP_SIZE		8
++#define DRIVE_MAP_SIZE		128
+ 
+ /* The size of the key map.  */
+ #define KEY_MAP_SIZE		128
+-- 
+1.7.6
+
diff --git a/grub-0.97-bz677468-properly-align-stack.patch b/grub-0.97-bz677468-properly-align-stack.patch
new file mode 100644
index 0000000..11cf5fe
--- /dev/null
+++ b/grub-0.97-bz677468-properly-align-stack.patch
@@ -0,0 +1,716 @@
+From 0dec9e9cf6f2a4ca1fe1bda49203b2d619073d2a Mon Sep 17 00:00:00 2001
+From: Peter Jones <pjones at redhat.com>
+Date: Fri, 12 Aug 2011 16:29:26 -0400
+Subject: [PATCH] Use gnu-efi's callwrap
+
+---
+ efi/Makefile.am       |    2 +-
+ efi/x86_64/callwrap.S |  277 ++++++++++++++++++++++++++++++++++
+ efi/x86_64/callwrap.c |  395 -------------------------------------------------
+ 3 files changed, 278 insertions(+), 396 deletions(-)
+ delete mode 100644 efi/x86_64/callwrap.c
+
+diff --git a/efi/Makefile.am b/efi/Makefile.am
+index 4cffe7d..1d5bd15 100644
+--- a/efi/Makefile.am
++++ b/efi/Makefile.am
+@@ -67,7 +67,7 @@ RELOC_FLAGS = $(STAGE2_CFLAGS) -I$(top_srcdir)/stage1 \
+ 	$(HERCULES_FLAGS) $(GRAPHICS_FLAGS)
+ 
+ noinst_LIBRARIES = libgrubefi.a
+-libgrubefi_a_SOURCES = $(EFI_ARCH)/callwrap.c eficore.c efimm.c efimisc.c \
++libgrubefi_a_SOURCES = $(EFI_ARCH)/callwrap.S eficore.c efimm.c efimisc.c \
+ 	eficon.c efidisk.c graphics.c efigraph.c efiuga.c efidp.c \
+ 	font_8x16.c efiserial.c $(EFI_ARCH)/loader/linux.c efichainloader.c \
+ 	xpm.c pxe.c efitftp.c
+diff --git a/efi/x86_64/callwrap.S b/efi/x86_64/callwrap.S
+index e69de29..ea86efb 100644
+--- a/efi/x86_64/callwrap.S
++++ b/efi/x86_64/callwrap.S
+@@ -0,0 +1,277 @@
++/*
++ * Function calling ABI conversion from Linux to EFI for x86_64
++ *
++ * Copyright (C) 2007 Intel Corp
++ *	Bibo Mao <bibo.mao at intel.com>
++ *	Huang Ying <ying.huang at intel.com>
++ */
++
++/*
++ * EFI calling conventions are documented at:
++ *   http://msdn.microsoft.com/en-us/library/ms235286%28v=vs.80%29.aspx
++ * ELF calling conventions are documented at:
++ *   http://www.x86-64.org/documentation/abi.pdf
++ *
++ * Basically here are the conversion rules:
++ * a) our function pointer is in %rdi
++ * b) ELF gives us 8-byte aligned %rsp, so we need to pad out to 16-byte
++ *    alignment.
++ * c) inside each call thunker, we can only adjust the stack by
++ *    multiples of 16 bytes. "offset" below refers to however much
++ *    we allocate inside a thunker.
++ * d) rsi through r8 (elf) aka rcx through r9 (ms) require stack space
++ *    on the MS side even though it's not getting used at all.
++ * e) arguments are as follows: (elf -> ms)
++ *   1) rdi -> rcx (32 saved)
++ *   2) rsi -> rdx (32 saved)
++ *   3) rdx -> r8 ( 32 saved)
++ *   4) rcx -> r9 (32 saved)
++ *   5) r8 -> 32(%rsp) (48 saved)
++ *   6) r9 -> 40(%rsp) (48 saved)
++ *   7) pad+offset+0(%rsp) -> 48(%rsp) (64 saved)
++ *   8) pad+offset+8(%rsp) -> 56(%rsp) (64 saved)
++ *   9) pad+offset+16(%rsp) -> 64(%rsp) (80 saved)
++ *  10) pad+offset+24(%rsp) -> 72(%rsp) (80 saved)
++ *  11) pad+offset+32(%rsp) -> 80(%rsp) (96 saved)
++ *  12) pad+offset+40(%rsp) -> 88(%rsp) (96 saved)
++ * f) because the first argument we recieve in a thunker is actually the
++ *    function to be called, arguments are offset as such:
++ *   0) rdi -> caller
++ *   1) rsi -> rcx (32 saved)
++ *   2) rdx -> rdx (32 saved)
++ *   3) rcx -> r8 (32 saved)
++ *   4) r8 -> r9 (32 saved)
++ *   5) r9 -> 32(%rsp) (48 saved)
++ *   6) pad+offset+0(%rsp) -> 40(%rsp) (48 saved)
++ *   7) pad+offset+8(%rsp) -> 48(%rsp) (64 saved)
++ *   8) pad+offset+16(%rsp) -> 56(%rsp) (64 saved)
++ *   9) pad+offset+24(%rsp) -> 64(%rsp) (80 saved)
++ *  10) pad+offset+32(%rsp) -> 72(%rsp) (80 saved)
++ *  11) pad+offset+40(%rsp) -> 80(%rsp) (96 saved)
++ *  12) pad+offset+48(%rsp) -> 88(%rsp) (96 saved)
++ * e) arguments need to be moved in opposite order to avoid clobbering
++ * f) pad_stack leaves the amount of padding it added in %r11 for functions
++ *    to use
++ * g) efi -> elf calls don't need to pad the stack, because the 16-byte
++ *    alignment is also always 8-byte aligned.
++ */
++
++#define ENTRY(name)	\
++	.globl name;	\
++	name:
++
++#define out(val)		\
++	push %rax ;		\
++	mov val, %rax ;		\
++	out %al, $128 ;		\
++	pop %rax
++
++#define pad_stack							\
++	subq $8, %rsp ; /* must be a multiple of 16 - sizeof(%rip) */	\
++	/* stash some handy integers */					\
++	mov $0x8, %rax ;						\
++	mov $0x10, %r10 ;						\
++	mov $0xf, %r11 ;						\
++	/* see if we need padding */					\
++	and %rsp, %rax ;						\
++	/* store the pad amount in %r11 */				\
++	cmovnz %rax, %r11 ;						\
++	cmovz %r10, %r11 ;						\
++	/* insert the padding */					\
++	subq %r11, %rsp ;						\
++	/* add the $8 we saved above in %r11 */				\
++	addq $8, %r11 ;							\
++	/* store the pad amount */					\
++	mov %r11, (%rsp) ;						\
++	/* compensate for %rip being stored on the stack by call */	\
++	addq $8, %r11
++
++#define unpad_stack							\
++	/* fetch the pad amount	we saved (%r11 has been clobbered) */	\
++	mov (%rsp), %r11 ;						\
++	/* remove the padding */					\
++	addq %r11, %rsp
++
++ENTRY(x64_call0)
++	pad_stack
++	subq $32, %rsp
++	call *%rdi
++	addq $32, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call1)
++	pad_stack
++	subq $32, %rsp
++	mov  %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call2)
++	pad_stack
++	subq $32, %rsp
++	/* mov %rdx, %rdx */
++	mov  %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call3)
++	pad_stack
++	subq $32, %rsp
++	mov  %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov  %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call4)
++	pad_stack
++	subq $32, %rsp
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $32, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call5)
++	pad_stack
++	subq $48, %rsp
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $48, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call6)
++	pad_stack
++	subq $48, %rsp
++	addq $48, %r11
++	addq %rsp, %r11
++	mov (%r11), %rax
++	mov %rax, 40(%rsp)
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $48, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call7)
++	pad_stack
++	subq $64, %rsp
++	addq $64, %r11
++	addq $8, %r11
++	addq %rsp, %r11
++	mov (%r11), %rax
++	mov %rax, 48(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 40(%rsp)
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $64, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call8)
++	pad_stack
++	subq $64, %rsp
++	addq $64, %r11
++	addq $16, %r11
++	addq %rsp, %r11
++	mov (%r11), %rax
++	mov %rax, 56(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 48(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 40(%rsp)
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $64, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call9)
++	pad_stack
++	subq $80, %rsp
++	addq $80, %r11
++	addq $24, %r11
++	addq %rsp, %r11
++	mov (%r11), %rax
++	mov %rax, 64(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 56(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 48(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 40(%rsp)
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $80, %rsp
++	unpad_stack
++	ret
++
++ENTRY(x64_call10)
++	pad_stack
++	subq $80, %rsp
++	addq $80, %r11
++	addq $32, %r11
++	addq %rsp, %r11
++	mov (%r11), %rax
++	mov %rax, 72(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 64(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 56(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 48(%rsp)
++	subq $8, %r11
++	mov (%r11), %rax
++	mov %rax, 40(%rsp)
++	mov %r9, 32(%rsp)
++	mov %r8, %r9
++	mov %rcx, %r8
++	/* mov %rdx, %rdx */
++	mov %rsi, %rcx
++	call *%rdi
++	addq $80, %rsp
++	unpad_stack
++	ret
++
++
+diff --git a/efi/x86_64/callwrap.c b/efi/x86_64/callwrap.c
+deleted file mode 100644
+index eb8fd25..0000000
+--- a/efi/x86_64/callwrap.c
++++ /dev/null
+@@ -1,395 +0,0 @@
+-/*
+- *  Copyright (C) 2006 Giridhar Pemmasani
+- *  Copyright (C) 2007-2010 Intel Corp
+- *  	Contributed by Chandramouli Narayanan<mouli at linux.intel.com>
+- *	Adapted wrapper macros for Linux to windows calls from
+- *	NDIS wrapper project (http:/ndiswrapper.sourceforge.net)
+- *	
+- *
+- *  This program is free software; you can redistribute it and/or modify
+- *  it under the terms of the GNU General Public License as published by
+- *  the Free Software Foundation; either version 2 of the License, or
+- *  (at your option) any later version.
+- *
+- *  This program is distributed in the hope that it will be useful,
+- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- *  GNU General Public License for more details.
+- *
+- */
+-
+-#define alloc_win_stack_frame(argc)		\
+-	"subq $" #argc "*8, %%rsp\n\t"
+-#define free_win_stack_frame(argc)		\
+-	"addq $" #argc "*8, %%rsp\n\t"
+-
+-/* m is index of Windows arg required, n is total number of args to
+- * function Windows arg 1 should be at 0(%rsp), arg 2 at 8(%rsp) and
+- * so on, after stack frame is allocated, which starts at -n*8(%rsp)
+- * when stack frame is allocated. 4 > m >= n.
+-*/
+-
+-#define lin2win_win_arg(m,n) "(" #m "-1-" #n ")*8(%%rsp)"
+-
+-/* volatile args for Windows function must be in clobber / output list */
+-extern unsigned long long x64_call0(void *func);
+-extern unsigned long long x64_call1(void *func, unsigned long long arg1);
+-extern unsigned long long x64_call2(void *func, unsigned long long arg1, unsigned long long arg2);
+-extern unsigned long long x64_call3(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3);
+-extern unsigned long long x64_call4(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4);
+-extern unsigned long long x64_call5(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5);
+-extern unsigned long long x64_call6(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6);
+-extern unsigned long long x64_call7(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7);
+-extern unsigned long long x64_call8(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7,
+-	unsigned long long arg8);
+-extern unsigned long long x64_call9(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7,
+-	unsigned long long arg8,
+-	unsigned long long arg9);
+-extern unsigned long long x64_call10(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7,
+-	unsigned long long arg8,
+-	unsigned long long arg9,
+-	unsigned long long arg10);
+-
+-
+-unsigned long long x64_call0(void *func)
+-{									
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8");
+-	register unsigned long long r9 __asm__("r9");
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(					
+-		alloc_win_stack_frame(4)				
+-		"call *%[fptr]\n\t"					
+-		free_win_stack_frame(4)					
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),		
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)		
+-		: [fptr] "r" (func));					
+-	return ret;								
+-}
+-
+-unsigned long long x64_call1(void *func, unsigned long long arg1)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8");
+-	register unsigned long long r9 __asm__("r9");
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		alloc_win_stack_frame(4)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(4)	
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call2(void *func, unsigned long long arg1, unsigned long long arg2)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8");
+-	register unsigned long long r9 __asm__("r9");
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		alloc_win_stack_frame(4)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(4)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call3(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9");
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		alloc_win_stack_frame(4)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(4)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call4(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		alloc_win_stack_frame(4)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(4)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call5(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		"mov %[rarg5], " lin2win_win_arg(5,6) "\n\t"
+-		alloc_win_stack_frame(6)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(6)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [rarg5] "r" ((unsigned long long)arg5),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call6(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		"movq %[rarg5], " lin2win_win_arg(5,6) "\n\t"
+-		"movq %[rarg6], " lin2win_win_arg(6,6) "\n\t"
+-		alloc_win_stack_frame(6)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(6)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [rarg5] "r" ((unsigned long long)arg5), [rarg6] "r" ((unsigned long long)arg6),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call7(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		"movq %[rarg5], " lin2win_win_arg(5,7) "\n\t"
+-		"movq %[rarg6], " lin2win_win_arg(6,7) "\n\t"
+-		"movq %[rarg7], " lin2win_win_arg(7,7) "\n\t"
+-		alloc_win_stack_frame(7)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(7)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [rarg5] "r" ((unsigned long long)arg5), [rarg6] "r" ((unsigned long long)arg6),
+-		  [rarg7] "r" ((unsigned long long)arg7), [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call8(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7,
+-	unsigned long long arg8)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		"movq %[rarg5], " lin2win_win_arg(5,8) "\n\t"
+-		"movq %[rarg6], " lin2win_win_arg(6,8) "\n\t"
+-		"movq %[rarg7], " lin2win_win_arg(7,8) "\n\t"
+-		"movq %[rarg8], " lin2win_win_arg(8,8) "\n\t"
+-		alloc_win_stack_frame(8)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(8)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [rarg5] "r" ((unsigned long long)arg5), [rarg6] "r" ((unsigned long long)arg6),
+-		  [rarg7] "r" ((unsigned long long)arg7), [rarg8] "r" ((unsigned long long)arg8),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call9(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7,
+-	unsigned long long arg8,
+-	unsigned long long arg9)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		"movq %[rarg5], " lin2win_win_arg(5,9) "\n\t"
+-		"movq %[rarg6], " lin2win_win_arg(6,9) "\n\t"
+-		"movq %[rarg7], " lin2win_win_arg(7,9) "\n\t"
+-		"movq %[rarg8], " lin2win_win_arg(8,9) "\n\t"
+-		"movq %[rarg9], " lin2win_win_arg(9,9) "\n\t"
+-		alloc_win_stack_frame(9)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(9)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [rarg5] "r" ((unsigned long long)arg5), [rarg6] "r" ((unsigned long long)arg6),
+-		  [rarg7] "r" ((unsigned long long)arg7), [rarg8] "r" ((unsigned long long)arg8),
+-		  [rarg9] "r" ((unsigned long long)arg9), [fptr] "r" (func));
+-	return ret;
+-}
+-
+-unsigned long long x64_call10(
+-	void *func,
+-	unsigned long long arg1,
+-	unsigned long long arg2,
+-	unsigned long long arg3,
+-	unsigned long long arg4,
+-	unsigned long long arg5,
+-	unsigned long long arg6,
+-	unsigned long long arg7,
+-	unsigned long long arg8,
+-	unsigned long long arg9,
+-	unsigned long long arg10)
+-{
+-	unsigned long long ret, dummy;
+-	register unsigned long long r8 __asm__("r8") = (unsigned long long)arg3;
+-	register unsigned long long r9 __asm__("r9") = (unsigned long long)arg4;
+-	register unsigned long long r10 __asm__("r10");
+-	register unsigned long long r11 __asm__("r11");
+-	__asm__ __volatile__(
+-		"movq %[rarg5], " lin2win_win_arg(5,10) "\n\t"
+-		"movq %[rarg6], " lin2win_win_arg(6,10) "\n\t"
+-		"movq %[rarg7], " lin2win_win_arg(7,10) "\n\t"
+-		"movq %[rarg8], " lin2win_win_arg(8,10) "\n\t"
+-		"movq %[rarg9], " lin2win_win_arg(9,10) "\n\t"
+-		"movq %[rarg10], " lin2win_win_arg(10,10) "\n\t"
+-		alloc_win_stack_frame(10)
+-		"call *%[fptr]\n\t"
+-		free_win_stack_frame(10)
+-		: "=a" (ret), "=c" (dummy), "=d" (dummy),
+-		  "=r" (r8), "=r" (r9), "=r" (r10), "=r" (r11)
+-		: "c" (arg1), "d" (arg2), "r" (r8), "r" (r9),
+-		  [rarg5] "r" ((unsigned long long)arg5), [rarg6] "r" ((unsigned long long)arg6),
+-		  [rarg7] "r" ((unsigned long long)arg7), [rarg8] "r" ((unsigned long long)arg8),
+-		  [rarg9] "r" ((unsigned long long)arg9), [rarg10] "r" ((unsigned long long)arg10),
+-		  [fptr] "r" (func));
+-	return ret;
+-}
+-
+diff --git a/efi/ia32/callwrap.S b/efi/ia32/callwrap.S
+--- /dev/null
++++ b/efi/ia32/callwrap.S
+@@ -0,0 +1,1 @@
++/* This file is a stub for ia32 building */
+-- 
+1.7.6
+
diff --git a/grub.spec b/grub.spec
index 4ffc78c..17548b8 100644
--- a/grub.spec
+++ b/grub.spec
@@ -1,6 +1,6 @@
 Name: grub
 Version: 0.97
-Release: 75%{?dist}
+Release: 76%{?dist}
 Epoch: 1
 Summary: Grand Unified Boot Loader.
 Group: System Environment/Base
@@ -17,6 +17,7 @@ Requires(preun): /sbin/install-info
 Requires: mktemp
 Requires: system-logos
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Conflicts: grub2
 
 URL: http://www.gnu.org/software/%{name}/
 Source0: ftp://alpha.gnu.org/gnu/%{name}/%{name}-%{version}.tar.gz
@@ -26,6 +27,8 @@ Source0: ftp://alpha.gnu.org/gnu/%{name}/%{name}-%{version}.tar.gz
 Patch0: grub-fedora-16.patch
 Patch1: 0001-Revert-Added-support-for-btrfs-file-system.patch
 Patch2: 0002-Revert-Make-changes-in-the-following-files-for-btrfs.patch
+Patch3: grub-0.97-128-drives-support.patch
+Patch4: grub-0.97-bz677468-properly-align-stack.patch
 
 %description
 GRUB (Grand Unified Boot Loader) is an experimental boot loader
@@ -106,6 +109,14 @@ fi
 %{_datadir}/grub
 
 %changelog
+* Wed Aug 17 2011 Peter Jones <pjones at redhat.com> - 0.97-76
+- Fix stack alignment on UEFI machines (replaces patch in -74)
+  Resolves: rhbz#677468
+- Allow booting from higher drive numbers
+  Resolves: rhbz#671355
+- Conflict with grub2
+  Resolves: rhbz#731226
+
 * Mon Jun 06 2011 Peter Jones <pjones at redhat.com> - 0.97-75
 - Update to Fedora-16 branch.
 - This has the commit from 0.97-74 reverted.


More information about the scm-commits mailing list