summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/i8259.c8
-rw-r--r--arch/x86_64/kernel/pci-gart.c4
-rw-r--r--arch/x86_64/kernel/pci-nommu.c2
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/suspend.c95
-rw-r--r--arch/x86_64/kernel/time.c32
6 files changed, 47 insertions, 96 deletions
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index b2a238b..c6c9791 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -494,7 +494,7 @@ void invalidate_interrupt7(void);
void thermal_interrupt(void);
void i8254_timer_resume(void);
-static void setup_timer(void)
+static void setup_timer_hardware(void)
{
outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
@@ -505,13 +505,13 @@ static void setup_timer(void)
static int timer_resume(struct sys_device *dev)
{
- setup_timer();
+ setup_timer_hardware();
return 0;
}
void i8254_timer_resume(void)
{
- setup_timer();
+ setup_timer_hardware();
}
static struct sysdev_class timer_sysclass = {
@@ -594,7 +594,7 @@ void __init init_IRQ(void)
* Set the clock to HZ Hz, we already have a valid
* vector now:
*/
- setup_timer();
+ setup_timer_hardware();
if (!acpi_ioapic)
setup_irq(2, &irq2);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index cf0a031..88be97c 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -187,7 +187,7 @@ static void flush_gart(struct device *dev)
/* Allocate DMA memory on node near device */
noinline
-static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order)
+static void *dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
struct page *page;
int node;
@@ -204,7 +204,7 @@ static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order)
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned gfp)
+ gfp_t gfp)
{
void *memory;
unsigned long dma_mask = 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 67d90b8..5a981dc 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -24,7 +24,7 @@ EXPORT_SYMBOL(iommu_sac_force);
*/
void *dma_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, unsigned gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
u64 mask;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index cb28df1..da0bc3e 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -1213,7 +1213,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */
- "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index f066c6a..fd2bef7 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -63,13 +63,12 @@ void save_processor_state(void)
__save_processor_state(&saved_context);
}
-static void
-do_fpu_end(void)
+static void do_fpu_end(void)
{
- /* restore FPU regs if necessary */
- /* Do it out of line so that gcc does not move cr0 load to some stupid place */
- kernel_fpu_end();
- mxcsr_feature_mask_init();
+ /*
+ * Restore FPU regs if necessary
+ */
+ kernel_fpu_end();
}
void __restore_processor_state(struct saved_context *ctxt)
@@ -148,57 +147,7 @@ extern int restore_image(void);
pgd_t *temp_level4_pgt;
-static void **pages;
-
-static inline void *__add_page(void)
-{
- void **c;
-
- c = (void **)get_usable_page(GFP_ATOMIC);
- if (c) {
- *c = pages;
- pages = c;
- }
- return c;
-}
-
-static inline void *__next_page(void)
-{
- void **c;
-
- c = pages;
- if (c) {
- pages = *c;
- *c = NULL;
- }
- return c;
-}
-
-/*
- * Try to allocate as many usable pages as needed and daisy chain them.
- * If one allocation fails, free the pages allocated so far
- */
-static int alloc_usable_pages(unsigned long n)
-{
- void *p;
-
- pages = NULL;
- do
- if (!__add_page())
- break;
- while (--n);
- if (n) {
- p = __next_page();
- while (p) {
- free_page((unsigned long)p);
- p = __next_page();
- }
- return -ENOMEM;
- }
- return 0;
-}
-
-static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{
long i, j;
@@ -212,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
if (paddr >= end)
break;
- pmd = (pmd_t *)__next_page();
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
unsigned long pe;
@@ -224,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
set_pmd(pmd, __pmd(pe));
}
}
+ return 0;
}
-static void set_up_temporary_mappings(void)
+static int set_up_temporary_mappings(void)
{
unsigned long start, end, next;
+ int error;
- temp_level4_pgt = (pgd_t *)__next_page();
+ temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!temp_level4_pgt)
+ return -ENOMEM;
/* It is safe to reuse the original kernel mapping */
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
@@ -241,29 +196,27 @@ static void set_up_temporary_mappings(void)
end = (unsigned long)pfn_to_kaddr(end_pfn);
for (; start < end; start = next) {
- pud_t *pud = (pud_t *)__next_page();
+ pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
next = start + PGDIR_SIZE;
if (next > end)
next = end;
- res_phys_pud_init(pud, __pa(start), __pa(next));
+ if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
+ return error;
set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud)));
}
+ return 0;
}
int swsusp_arch_resume(void)
{
- unsigned long n;
+ int error;
- n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
- n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
- pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
- if (alloc_usable_pages(n)) {
- free_eaten_memory();
- return -ENOMEM;
- }
/* We have got enough memory and from now on we cannot recover */
- set_up_temporary_mappings();
+ if ((error = set_up_temporary_mappings()))
+ return error;
restore_image();
return 0;
}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 703acde..fdaddc4 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -42,10 +42,6 @@
#include <asm/apic.h>
#endif
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
#ifdef CONFIG_CPU_FREQ
static void cpufreq_delayed_get(void);
#endif
@@ -481,9 +477,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static unsigned int cyc2ns_scale;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
+static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
- cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
+ cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -655,7 +651,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
vxtime.tsc_quot = (1000L << 32) / cpu_khz;
}
- set_cyc2ns_scale(cpu_khz_ref / 1000);
+ set_cyc2ns_scale(cpu_khz_ref);
return 0;
}
@@ -939,7 +935,7 @@ void __init time_init(void)
rdtscll_sync(&vxtime.last_tsc);
setup_irq(0, &irq0);
- set_cyc2ns_scale(cpu_khz / 1000);
+ set_cyc2ns_scale(cpu_khz);
#ifndef CONFIG_SMP
time_init_gtod();
@@ -1093,6 +1089,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
static unsigned long PIE_count;
static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
+static unsigned int hpet_t1_cmp; /* cached comparator register */
int is_hpet_enabled(void)
{
@@ -1129,10 +1126,12 @@ int hpet_rtc_timer_init(void)
cnt = hpet_readl(HPET_COUNTER);
cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
hpet_writel(cnt, HPET_T1_CMP);
+ hpet_t1_cmp = cnt;
local_irq_restore(flags);
cfg = hpet_readl(HPET_T1_CFG);
- cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
+ cfg &= ~HPET_TN_PERIODIC;
+ cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_writel(cfg, HPET_T1_CFG);
return 1;
@@ -1142,8 +1141,12 @@ static void hpet_rtc_timer_reinit(void)
{
unsigned int cfg, cnt;
- if (!(PIE_on | AIE_on | UIE_on))
+ if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
return;
+ }
if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
hpet_rtc_int_freq = PIE_freq;
@@ -1151,15 +1154,10 @@ static void hpet_rtc_timer_reinit(void)
hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
/* It is more accurate to use the comparator value than current count.*/
- cnt = hpet_readl(HPET_T1_CMP);
+ cnt = hpet_t1_cmp;
cnt += hpet_tick*HZ/hpet_rtc_int_freq;
hpet_writel(cnt, HPET_T1_CMP);
-
- cfg = hpet_readl(HPET_T1_CFG);
- cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
- hpet_writel(cfg, HPET_T1_CFG);
-
- return;
+ hpet_t1_cmp = cnt;
}
/*
OpenPOWER on IntegriCloud