kernel-ark/include/asm-i386/suspend.h
Rusty Russell d3561b7fa0 [PATCH] paravirt: header and stubs for paravirtualisation
Create a paravirt.h header for all the critical operations which need to be
replaced with hypervisor calls, and include that instead of defining native
operations, when CONFIG_PARAVIRT.

This patch does the dumbest possible replacement of paravirtualized
instructions: calls through a "paravirt_ops" structure.  Currently these are
function implementations of native hardware: hypervisors will override the ops
structure with their own variants.

All the pv-ops functions are declared "fastcall" so that a specific
register-based ABI is used, to make inlining assember easier.

And:

+From: Andy Whitcroft <apw@shadowen.org>

The paravirt ops introduce a 'weak' attribute onto memory_setup().
Code ordering leads to the following warnings on x86:

    arch/i386/kernel/setup.c:651: warning: weak declaration of
                `memory_setup' after first use results in unspecified behavior

Move memory_setup() to avoid this.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
2006-12-07 02:14:07 +01:00

58 lines
1.5 KiB
C

/*
* Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
* Based on code
* Copyright 2001 Patrick Mochel <mochel@osdl.org>
*/
#include <asm/desc.h>
#include <asm/i387.h>
static inline int
arch_prepare_suspend(void)
{
/* If you want to make non-PSE machine work, turn off paging
in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
it could work... */
if (!cpu_has_pse) {
printk(KERN_ERR "PSE is required for swsusp.\n");
return -EPERM;
}
return 0;
}
/* image of the saved processor state */
struct saved_context {
u16 es, fs, gs, ss;
unsigned long cr0, cr2, cr3, cr4;
struct Xgt_desc_struct gdt;
struct Xgt_desc_struct idt;
u16 ldt;
u16 tss;
unsigned long tr;
unsigned long safety;
unsigned long return_address;
} __attribute__((packed));
#ifdef CONFIG_ACPI_SLEEP
extern unsigned long saved_eip;
extern unsigned long saved_esp;
extern unsigned long saved_ebp;
extern unsigned long saved_ebx;
extern unsigned long saved_esi;
extern unsigned long saved_edi;
static inline void acpi_save_register_state(unsigned long return_point)
{
saved_eip = return_point;
asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
}
#define acpi_restore_register_state() do {} while (0)
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
#endif