summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2005-07-29 13:02:09 -0600
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-29 12:17:27 -0700
commit36c4fd23cc06f81d68ee968c4c1bf1cebb3dcea5 (patch)
tree53bbd9c8fef8b84e26e0ae0471eb6c912e7ba553 /arch/x86_64
parente7b47ccaf655cbaf336745a9b65cf7b22a536fca (diff)
downloadop-kernel-dev-36c4fd23cc06f81d68ee968c4c1bf1cebb3dcea5.zip
op-kernel-dev-36c4fd23cc06f81d68ee968c4c1bf1cebb3dcea5.tar.gz
[PATCH] x86_64 machine_kexec: Cleanup inline assembly.
In an uncensored copy of code from i386 to x86_64 I wound up with inline assembly with the wrong constraints. Use input constraints instead of output constraints. So I know the assembler will do the right thing specify the size of the operand lidtq and lgdtq instead of just lidt and lgdt. Make load_segments use an input constraint, and delete the macro fun. Without having to reload %cs like I do on i386 this code is noticeably simpler. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/machine_kexec.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 60d1eff..717f7db 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -122,45 +122,43 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
static void set_idt(void *newidt, u16 limit)
{
- unsigned char curidt[10];
+ struct desc_ptr curidt;
/* x86-64 supports unaliged loads & stores */
- (*(u16 *)(curidt)) = limit;
- (*(u64 *)(curidt +2)) = (unsigned long)(newidt);
+ curidt.size = limit;
+ curidt.address = (unsigned long)newidt;
__asm__ __volatile__ (
- "lidt %0\n"
- : "=m" (curidt)
+ "lidtq %0\n"
+ : : "m" (curidt)
);
};
static void set_gdt(void *newgdt, u16 limit)
{
- unsigned char curgdt[10];
+ struct desc_ptr curgdt;
/* x86-64 supports unaligned loads & stores */
- (*(u16 *)(curgdt)) = limit;
- (*(u64 *)(curgdt +2)) = (unsigned long)(newgdt);
+ curgdt.size = limit;
+ curgdt.address = (unsigned long)newgdt;
__asm__ __volatile__ (
- "lgdt %0\n"
- : "=m" (curgdt)
+ "lgdtq %0\n"
+ : : "m" (curgdt)
);
};
static void load_segments(void)
{
__asm__ __volatile__ (
- "\tmovl $"STR(__KERNEL_DS)",%eax\n"
- "\tmovl %eax,%ds\n"
- "\tmovl %eax,%es\n"
- "\tmovl %eax,%ss\n"
- "\tmovl %eax,%fs\n"
- "\tmovl %eax,%gs\n"
+ "\tmovl %0,%%ds\n"
+ "\tmovl %0,%%es\n"
+ "\tmovl %0,%%ss\n"
+ "\tmovl %0,%%fs\n"
+ "\tmovl %0,%%gs\n"
+ : : "a" (__KERNEL_DS)
);
-#undef STR
-#undef __STR
}
typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
OpenPOWER on IntegriCloud