summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/conf/GENERIC2
-rw-r--r--sys/ia64/conf/NOTES2
-rw-r--r--sys/ia64/ia64/busdma_machdep.c167
-rw-r--r--sys/ia64/ia64/clock.c193
-rw-r--r--sys/ia64/ia64/db_machdep.c12
-rw-r--r--sys/ia64/ia64/efi.c55
-rw-r--r--sys/ia64/ia64/exception.S225
-rw-r--r--sys/ia64/ia64/interrupt.c63
-rw-r--r--sys/ia64/ia64/machdep.c13
-rw-r--r--sys/ia64/ia64/mp_machdep.c34
-rw-r--r--sys/ia64/ia64/pmap.c194
-rw-r--r--sys/ia64/include/efi.h3
-rw-r--r--sys/ia64/include/ia64_cpu.h13
-rw-r--r--sys/ia64/include/pcpu.h4
-rw-r--r--sys/ia64/include/pmap.h10
-rw-r--r--sys/ia64/include/sf_buf.h16
-rw-r--r--sys/ia64/include/smp.h2
17 files changed, 591 insertions, 417 deletions
diff --git a/sys/ia64/conf/GENERIC b/sys/ia64/conf/GENERIC
index 2d8d316..eaf3ffd 100644
--- a/sys/ia64/conf/GENERIC
+++ b/sys/ia64/conf/GENERIC
@@ -48,7 +48,7 @@ options NFSLOCKD # Network Lock Manager
options NFSD # New Network Filesystem Server
options NFS_ROOT # NFS usable as root device
options P1003_1B_SEMAPHORES # POSIX-style semaphores
-options PREEMPTION # Enable kernel thread preemption
+#options PREEMPTION # Enable kernel thread preemption
options PRINTF_BUFR_SIZE=128 # Printf buffering to limit interspersion
options PROCFS # Process filesystem (/proc)
options PSEUDOFS # Pseudo-filesystem framework
diff --git a/sys/ia64/conf/NOTES b/sys/ia64/conf/NOTES
index 3f38218..0f8a4e7 100644
--- a/sys/ia64/conf/NOTES
+++ b/sys/ia64/conf/NOTES
@@ -25,7 +25,7 @@ options LOG2_ID_PAGE_SIZE=27 # 128M
# option: LOG2_PAGE_SIZE
# Specify the log2 size of the page to be used for virtual memory management.
# The page size being equal to 1<<LOG2_PAGE_SIZE.
-options LOG2_PAGE_SIZE=15 # 32K
+options LOG2_PAGE_SIZE=14 # 16K
# option: SKI
# Build support for running under the ski simulator.
diff --git a/sys/ia64/ia64/busdma_machdep.c b/sys/ia64/ia64/busdma_machdep.c
index 7554d31..d7152df 100644
--- a/sys/ia64/ia64/busdma_machdep.c
+++ b/sys/ia64/ia64/busdma_machdep.c
@@ -51,21 +51,21 @@ __FBSDID("$FreeBSD$");
#define MAX_BPAGES 1024
struct bus_dma_tag {
- bus_dma_tag_t parent;
- bus_size_t alignment;
- bus_size_t boundary;
- bus_addr_t lowaddr;
- bus_addr_t highaddr;
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
bus_dma_filter_t *filter;
- void *filterarg;
- bus_size_t maxsize;
- u_int nsegments;
- bus_size_t maxsegsz;
- int flags;
- int ref_count;
- int map_count;
- bus_dma_lock_t *lockfunc;
- void *lockfuncarg;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
bus_dma_segment_t *segments;
};
@@ -90,27 +90,27 @@ static int total_deferred;
SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
- "Free bounce pages");
+ "Free bounce pages");
SYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
- 0, "Reserved bounce pages");
+ 0, "Reserved bounce pages");
SYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
- "Active bounce pages");
+ "Active bounce pages");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
- "Total bounce pages");
+ "Total bounce pages");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
- "Total bounce requests");
-SYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred, 0,
- "Total bounce requests that were deferred");
+ "Total bounce requests");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred,
+ 0, "Total bounce requests that were deferred");
struct bus_dmamap {
- struct bp_list bpages;
- int pagesneeded;
- int pagesreserved;
- bus_dma_tag_t dmat;
- void *buf; /* unmapped buffer pointer */
- bus_size_t buflen; /* unmapped buffer length */
+ struct bp_list bpages;
+ int pagesneeded;
+ int pagesreserved;
+ bus_dma_tag_t dmat;
+ void *buf; /* unmapped buffer pointer */
+ bus_size_t buflen; /* unmapped buffer length */
bus_dmamap_callback_t *callback;
- void *callback_arg;
+ void *callback_arg;
STAILQ_ENTRY(bus_dmamap) links;
};
@@ -121,12 +121,12 @@ static struct bus_dmamap nobounce_dmamap;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
- int commit);
+ int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_offset_t vaddr, bus_size_t size);
+ vm_offset_t vaddr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
- bus_size_t len);
+ bus_size_t len);
/*
* Return true if a match is made.
@@ -144,16 +144,14 @@ run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
retval = 0;
bndy = dmat->boundary;
-
do {
- if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
- || ((paddr & (dmat->alignment - 1)) != 0)
- || ((paddr & bndy) != ((paddr + len) & bndy)))
- && (dmat->filter == NULL
- || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+ if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
+ (paddr & (dmat->alignment - 1)) != 0 ||
+ (paddr & bndy) != ((paddr + len) & bndy)) &&
+ (dmat->filter == NULL ||
+ (*dmat->filter)(dmat->filterarg, paddr) != 0))
retval = 1;
-
- dmat = dmat->parent;
+ dmat = dmat->parent;
} while (retval == 0 && dmat != NULL);
return (retval);
}
@@ -195,16 +193,16 @@ dflt_lock(void *arg, bus_dma_lock_op_t op)
}
#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
+
/*
* Allocate a device specific dma_tag.
*/
int
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
- bus_size_t boundary, bus_addr_t lowaddr,
- bus_addr_t highaddr, bus_dma_filter_t *filter,
- void *filterarg, bus_size_t maxsize, int nsegments,
- bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
- void *lockfuncarg, bus_dma_tag_t *dmat)
+ bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
{
bus_dma_tag_t newtag;
int error = 0;
@@ -250,7 +248,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->boundary = parent->boundary;
else if (parent->boundary != 0)
newtag->boundary = MIN(parent->boundary,
- newtag->boundary);
+ newtag->boundary);
if (newtag->filter == NULL) {
/*
* Short circuit looking at our parent directly
@@ -279,7 +277,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
/* Performed initial allocation */
newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
}
-
+
if (error != 0) {
free(newtag, M_DEVBUF);
} else {
@@ -347,7 +345,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
int maxpages;
*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
- M_NOWAIT | M_ZERO);
+ M_NOWAIT | M_ZERO);
if (*mapp == NULL)
return (ENOMEM);
@@ -408,7 +406,7 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
*/
int
bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
- bus_dmamap_t *mapp)
+ bus_dmamap_t *mapp)
{
int mflags;
@@ -430,7 +428,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
- /*
+ /*
* XXX:
* (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
* alignment guarantees of malloc need to be nailed down, and the
@@ -489,15 +487,9 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
* first indicates if this is the first invocation of this function.
*/
static int
-_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
- bus_dmamap_t map,
- void *buf, bus_size_t buflen,
- struct thread *td,
- int flags,
- bus_addr_t *lastaddrp,
- bus_dma_segment_t *segs,
- int *segp,
- int first)
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct thread *td, int flags, bus_addr_t *lastaddrp,
+ bus_dma_segment_t *segs, int *segp, int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
@@ -607,7 +599,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
@@ -636,11 +628,11 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
- bus_size_t buflen, bus_dmamap_callback_t *callback,
- void *callback_arg, int flags)
+ bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
+ int flags)
{
- bus_addr_t lastaddr = 0;
- int error, nsegs = 0;
+ bus_addr_t lastaddr = 0;
+ int error, nsegs = 0;
if (map != NULL) {
flags |= BUS_DMA_WAITOK;
@@ -666,10 +658,8 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
* Like _bus_dmamap_load(), but for mbufs.
*/
int
-bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
- struct mbuf *m0,
- bus_dmamap_callback2_t *callback, void *callback_arg,
- int flags)
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
+ bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
int nsegs, error;
@@ -686,9 +676,8 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
- m->m_data, m->m_len,
- NULL, flags, &lastaddr,
- dmat->segments, &nsegs, first);
+ m->m_data, m->m_len, NULL, flags,
+ &lastaddr, dmat->segments, &nsegs, first);
first = 0;
}
}
@@ -707,9 +696,8 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
}
int
-bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
- struct mbuf *m0, bus_dma_segment_t *segs,
- int *nsegs, int flags)
+bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
+ bus_dma_segment_t *segs, int *nsegs, int flags)
{
int error;
@@ -726,9 +714,8 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
- m->m_data, m->m_len,
- NULL, flags, &lastaddr,
- segs, nsegs, first);
+ m->m_data, m->m_len, NULL, flags,
+ &lastaddr, segs, nsegs, first);
first = 0;
}
}
@@ -744,10 +731,8 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
* Like _bus_dmamap_load(), but for uios.
*/
int
-bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
- struct uio *uio,
- bus_dmamap_callback2_t *callback, void *callback_arg,
- int flags)
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
+ bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
bus_addr_t lastaddr;
int nsegs, error, first, i;
@@ -826,8 +811,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
bcopy((void *)bpage->datavaddr,
- (void *)bpage->vaddr,
- bpage->datacount);
+ (void *)bpage->vaddr, bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
total_bounced++;
@@ -836,8 +820,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
bcopy((void *)bpage->vaddr,
- (void *)bpage->datavaddr,
- bpage->datacount);
+ (void *)bpage->datavaddr, bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
total_bounced++;
@@ -870,15 +853,11 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
struct bounce_page *bpage;
bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
- M_NOWAIT | M_ZERO);
-
+ M_NOWAIT | M_ZERO);
if (bpage == NULL)
break;
bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
- M_NOWAIT, 0ul,
- dmat->lowaddr,
- PAGE_SIZE,
- dmat->boundary);
+ M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, dmat->boundary);
if (bpage->vaddr == 0) {
free(bpage, M_DEVBUF);
break;
@@ -914,7 +893,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
- bus_size_t size)
+ bus_size_t size)
{
struct bounce_page *bpage;
@@ -974,8 +953,8 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
- STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
- map, links);
+ STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map,
+ links);
busdma_swi_pending = 1;
total_deferred++;
swi_sched(vm_ih, 0);
@@ -997,7 +976,7 @@ busdma_swi(void)
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
- map->callback, map->callback_arg, /*flags*/0);
+ map->callback, map->callback_arg, /*flags*/0);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}
diff --git a/sys/ia64/ia64/clock.c b/sys/ia64/ia64/clock.c
index 33dbb2e..24623c5 100644
--- a/sys/ia64/ia64/clock.c
+++ b/sys/ia64/ia64/clock.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2005 Marcel Moolenaar
+ * Copyright (c) 2005, 2009-2011 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,9 +32,11 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/priority.h>
+#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/timeet.h>
#include <sys/timetc.h>
#include <sys/pcpu.h>
@@ -45,26 +47,12 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
#include <machine/smp.h>
-SYSCTL_NODE(_debug, OID_AUTO, clock, CTLFLAG_RW, 0, "clock statistics");
-
-static int adjust_edges = 0;
-SYSCTL_INT(_debug_clock, OID_AUTO, adjust_edges, CTLFLAG_RD,
- &adjust_edges, 0, "Number of times ITC got more than 12.5% behind");
-
-static int adjust_excess = 0;
-SYSCTL_INT(_debug_clock, OID_AUTO, adjust_excess, CTLFLAG_RD,
- &adjust_excess, 0, "Total number of ignored ITC interrupts");
-
-static int adjust_lost = 0;
-SYSCTL_INT(_debug_clock, OID_AUTO, adjust_lost, CTLFLAG_RD,
- &adjust_lost, 0, "Total number of lost ITC interrupts");
-
-static int adjust_ticks = 0;
-SYSCTL_INT(_debug_clock, OID_AUTO, adjust_ticks, CTLFLAG_RD,
- &adjust_ticks, 0, "Total number of ITC interrupts with adjustment");
+#define CLOCK_ET_OFF 0
+#define CLOCK_ET_PERIODIC 1
+#define CLOCK_ET_ONESHOT 2
+static struct eventtimer ia64_clock_et;
static u_int ia64_clock_xiv;
-static uint64_t ia64_clock_reload;
#ifndef SMP
static timecounter_get_t ia64_get_timecount;
@@ -87,75 +75,102 @@ ia64_get_timecount(struct timecounter* tc)
static u_int
ia64_ih_clock(struct thread *td, u_int xiv, struct trapframe *tf)
{
- uint64_t adj, clk, itc;
- int64_t delta;
- int count;
+ struct eventtimer *et;
+ uint64_t itc, load;
+ uint32_t mode;
PCPU_INC(md.stats.pcs_nclks);
+ intrcnt[INTRCNT_CLOCK]++;
- if (PCPU_GET(cpuid) == 0) {
- /*
- * Clock processing on the BSP.
- */
- intrcnt[INTRCNT_CLOCK]++;
+ itc = ia64_get_itc();
+ PCPU_SET(md.clock, itc);
- itc = ia64_get_itc();
+ mode = PCPU_GET(md.clock_mode);
+ if (mode == CLOCK_ET_PERIODIC) {
+ load = PCPU_GET(md.clock_load);
+ ia64_set_itm(itc + load);
+ } else
+ ia64_set_itv((1 << 16) | xiv);
- adj = PCPU_GET(md.clockadj);
- clk = PCPU_GET(md.clock);
+ ia64_set_eoi(0);
+ ia64_srlz_d();
- delta = itc - clk;
- count = 0;
- while (delta >= ia64_clock_reload) {
-#ifdef SMP
- ipi_all_but_self(ia64_clock_xiv);
-#endif
- hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
- if (profprocs != 0)
- profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
- statclock(TRAPF_USERMODE(tf));
- delta -= ia64_clock_reload;
- clk += ia64_clock_reload;
- if (adj != 0)
- adjust_ticks++;
- count++;
- }
- ia64_set_itm(ia64_get_itc() + ia64_clock_reload - adj);
- ia64_srlz_d();
- if (count > 0) {
- adjust_lost += count - 1;
- if (delta > (ia64_clock_reload >> 3)) {
- if (adj == 0)
- adjust_edges++;
- adj = ia64_clock_reload >> 4;
- } else
- adj = 0;
- } else {
- adj = 0;
- adjust_excess++;
- }
- PCPU_SET(md.clock, clk);
- PCPU_SET(md.clockadj, adj);
+ et = &ia64_clock_et;
+ if (et->et_active)
+ et->et_event_cb(et, et->et_arg);
+ return (1);
+}
+
+/*
+ * Event timer start method.
+ */
+static int
+ia64_clock_start(struct eventtimer *et, struct bintime *first,
+ struct bintime *period)
+{
+ u_long itc, load;
+ register_t is;
+
+ if (period != NULL) {
+ PCPU_SET(md.clock_mode, CLOCK_ET_PERIODIC);
+ load = (et->et_frequency * (period->frac >> 32)) >> 32;
+ if (period->sec > 0)
+ load += et->et_frequency * period->sec;
} else {
- /*
- * Clock processing on the BSP.
- */
- hardclock_cpu(TRAPF_USERMODE(tf));
- if (profprocs != 0)
- profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
- statclock(TRAPF_USERMODE(tf));
+ PCPU_SET(md.clock_mode, CLOCK_ET_ONESHOT);
+ load = 0;
}
+ PCPU_SET(md.clock_load, load);
+
+ if (first != NULL) {
+ load = (et->et_frequency * (first->frac >> 32)) >> 32;
+ if (first->sec > 0)
+ load += et->et_frequency * first->sec;
+ }
+
+ is = intr_disable();
+ itc = ia64_get_itc();
+ ia64_set_itm(itc + load);
+ ia64_set_itv(ia64_clock_xiv);
+ ia64_srlz_d();
+ intr_restore(is);
return (0);
}
/*
- * Start the real-time and statistics clocks. We use ar.itc and cr.itm
- * to implement a 1000hz clock.
+ * Event timer stop method.
+ */
+static int
+ia64_clock_stop(struct eventtimer *et)
+{
+
+ ia64_set_itv((1 << 16) | ia64_clock_xiv);
+ ia64_srlz_d();
+ PCPU_SET(md.clock_mode, CLOCK_ET_OFF);
+ PCPU_SET(md.clock_load, 0);
+ return (0);
+}
+
+/*
+ * We call cpu_initclocks() on the APs as well. It allows us to
+ * group common initialization in the same function.
*/
void
cpu_initclocks()
{
+
+ ia64_clock_stop(NULL);
+ if (PCPU_GET(cpuid) == 0)
+ cpu_initclocks_bsp();
+ else
+ cpu_initclocks_ap();
+}
+
+static void
+clock_configure(void *dummy)
+{
+ struct eventtimer *et;
u_long itc_freq;
ia64_clock_xiv = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI,
@@ -165,31 +180,23 @@ cpu_initclocks()
itc_freq = (u_long)ia64_itc_freq() * 1000000ul;
- stathz = hz;
- ia64_clock_reload = (itc_freq + hz/2) / hz;
+ et = &ia64_clock_et;
+ et->et_name = "ITC";
+ et->et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
+ et->et_quality = 1000;
+ et->et_frequency = itc_freq;
+ et->et_min_period.sec = 0;
+ et->et_min_period.frac = (0x8000000000000000ul / (u_long)(10*hz)) << 1;
+ et->et_max_period.sec = 0xffffffff;
+ et->et_max_period.frac = ((0xfffffffeul << 32) / itc_freq) << 32;
+ et->et_start = ia64_clock_start;
+ et->et_stop = ia64_clock_stop;
+ et->et_priv = NULL;
+ et_register(et);
#ifndef SMP
ia64_timecounter.tc_frequency = itc_freq;
tc_init(&ia64_timecounter);
#endif
-
- PCPU_SET(md.clockadj, 0);
- PCPU_SET(md.clock, ia64_get_itc());
- ia64_set_itm(PCPU_GET(md.clock) + ia64_clock_reload);
- ia64_set_itv(ia64_clock_xiv);
- ia64_srlz_d();
-}
-
-void
-cpu_startprofclock(void)
-{
-
- /* nothing to do */
-}
-
-void
-cpu_stopprofclock(void)
-{
-
- /* nothing to do */
}
+SYSINIT(clkcfg, SI_SUB_CONFIGURE, SI_ORDER_SECOND, clock_configure, NULL);
diff --git a/sys/ia64/ia64/db_machdep.c b/sys/ia64/ia64/db_machdep.c
index 9d583a1..b689a3c 100644
--- a/sys/ia64/ia64/db_machdep.c
+++ b/sys/ia64/ia64/db_machdep.c
@@ -578,11 +578,13 @@ db_show_mdpcpu(struct pcpu *pc)
{
struct pcpu_md *md = &pc->pc_md;
- db_printf("MD: vhpt = %#lx\n", md->vhpt);
- db_printf("MD: lid = %#lx\n", md->lid);
- db_printf("MD: clock = %#lx/%#lx\n", md->clock, md->clockadj);
- db_printf("MD: stats = %p\n", &md->stats);
- db_printf("MD: pmap = %p\n", md->current_pmap);
+ db_printf("MD: vhpt = %#lx\n", md->vhpt);
+ db_printf("MD: lid = %#lx\n", md->lid);
+ db_printf("MD: clock = %#lx\n", md->clock);
+ db_printf("MD: clock_mode = %u\n", md->clock_mode);
+ db_printf("MD: clock_load = %#lx\n", md->clock_load);
+ db_printf("MD: stats = %p\n", &md->stats);
+ db_printf("MD: pmap = %p\n", md->current_pmap);
}
void
diff --git a/sys/ia64/ia64/efi.c b/sys/ia64/ia64/efi.c
index 5cd4518..32868a0 100644
--- a/sys/ia64/ia64/efi.c
+++ b/sys/ia64/ia64/efi.c
@@ -161,20 +161,67 @@ efi_get_time(struct efi_tm *tm)
struct efi_md *
efi_md_first(void)
{
+ struct efi_md *md;
+
+ if (bootinfo->bi_memmap == 0)
+ return (NULL);
+ md = (struct efi_md *)bootinfo->bi_memmap;
+ return (md);
+}
+
+struct efi_md *
+efi_md_last(void)
+{
+ struct efi_md *md;
if (bootinfo->bi_memmap == 0)
return (NULL);
- return ((struct efi_md *)bootinfo->bi_memmap);
+ md = (struct efi_md *)(bootinfo->bi_memmap + bootinfo->bi_memmap_size -
+ bootinfo->bi_memdesc_size);
+ return (md);
}
struct efi_md *
efi_md_next(struct efi_md *md)
{
- uint64_t plim;
+ struct efi_md *lim;
- plim = bootinfo->bi_memmap + bootinfo->bi_memmap_size;
+ lim = efi_md_last();
md = (struct efi_md *)((uintptr_t)md + bootinfo->bi_memdesc_size);
- return ((md >= (struct efi_md *)plim) ? NULL : md);
+ return ((md > lim) ? NULL : md);
+}
+
+struct efi_md *
+efi_md_prev(struct efi_md *md)
+{
+ struct efi_md *lim;
+
+ lim = efi_md_first();
+ md = (struct efi_md *)((uintptr_t)md - bootinfo->bi_memdesc_size);
+ return ((md < lim) ? NULL : md);
+}
+
+struct efi_md *
+efi_md_find(vm_paddr_t pa)
+{
+ static struct efi_md *last = NULL;
+ struct efi_md *md, *p0, *p1;
+
+ md = (last != NULL) ? last : efi_md_first();
+ p1 = p0 = NULL;
+ while (md != NULL && md != p1) {
+ if (pa >= md->md_phys &&
+ pa < md->md_phys + md->md_pages * EFI_PAGE_SIZE) {
+ last = md;
+ return (md);
+ }
+
+ p1 = p0;
+ p0 = md;
+ md = (pa < md->md_phys) ? efi_md_prev(md) : efi_md_next(md);
+ }
+
+ return (NULL);
}
void
diff --git a/sys/ia64/ia64/exception.S b/sys/ia64/ia64/exception.S
index 729a96c..a38df1a 100644
--- a/sys/ia64/ia64/exception.S
+++ b/sys/ia64/ia64/exception.S
@@ -50,9 +50,6 @@ __FBSDID("$FreeBSD$");
.section .ivt.data, "aw"
- .global pmap_ptc_g_sem
-pmap_ptc_g_sem: data8 0
-
.global ia64_kptdir
ia64_kptdir: data8 0
@@ -151,58 +148,51 @@ ENTRY_NOPROFILE(exception_save, 0)
}
{ .mmi
mov ar.rsc=0
- sub r19=r23,r30
- add r31=8,r30
- ;;
-}
-{ .mmi
mov r22=cr.iip
- nop 0
addl r29=NTLBRT_SAVE,r0 // 22-bit restart token.
;;
}
/*
- * We have a 1KB aligned trapframe, pointed to by sp. If we write
- * to the trapframe, we may trigger a data nested TLB fault. By
- * aligning the trapframe on a 1KB boundary, we guarantee that if
- * we get a data nested TLB fault, it will be on the very first
- * write. Since the data nested TLB fault does not preserve any
- * state, we have to be careful what we clobber. Consequently, we
- * have to be careful what we use here. Below a list of registers
- * that are currently alive:
+ * We have a 1KB aligned trapframe, pointed to by r30. We can't
+ * reliably write to the trapframe using virtual addressing, due
+ * to the fact that TC entries we depend on can be removed by:
+ * 1. ptc.g instructions issued by other threads/cores/CPUs, or
+ * 2. TC modifications in another thread on the same core.
+ * When our TC entry gets removed, we get nested TLB faults and
+ * since no state is saved, we can only deal with those when
+ * explicitly coded and expected.
+ * As such, we switch to physical addressing and account for the
+ * fact that the tpa instruction can cause a nested TLB fault.
+ * Since the data nested TLB fault does not preserve any state,
+ * we have to be careful what we clobber. Consequently, we have
+ * to be careful what we use here. Below a list of registers that
+ * are considered alive:
* r16,r17=arguments
* r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS
- * r29=restart point
- * r30,r31=trapframe pointers
+ * r29=restart token
+ * r30=trapframe pointers
* p14,p15=memory stack switch
*/
-
- /* PTC.G enter non-exclusive */
- mov r24 = ar.ccv
- movl r25 = pmap_ptc_g_sem
- ;;
-.ptc_g_0:
- ld8.acq r26 = [r25]
- ;;
- tbit.nz p12, p0 = r26, 63
-(p12) br.cond.spnt.few .ptc_g_0
- ;;
- mov ar.ccv = r26
- adds r27 = 1, r26
+exception_save_restart:
+ tpa r24=r30 // Nested TLB fault possible
+ sub r19=r23,r30
+ nop 0
;;
- cmpxchg8.rel r27 = [r25], r27, ar.ccv
+
+ rsm psr.dt
+ add r29=16,r19 // Clobber restart token
+ mov r30=r24
;;
- cmp.ne p12, p0 = r26, r27
-(p12) br.cond.spnt.few .ptc_g_0
+ srlz.d
+ add r31=8,r24
;;
- mov ar.ccv = r24
-exception_save_restart:
+ // r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS
+ // r29=delta
{ .mmi
st8 [r30]=r19,16 // length
st8 [r31]=r0,16 // flags
- add r29=16,r19 // Clobber restart token
;;
}
{ .mmi
@@ -218,6 +208,7 @@ exception_save_restart:
;;
}
// r18=pr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=rp
+ // r24=pfs
{ .mmi
st8 [r30]=r23,16 // rp
st8 [r31]=r18,16 // pr
@@ -275,7 +266,7 @@ exception_save_restart:
sub r18=r18,r20
;;
}
- // r19=ifs, r22=iip
+ // r18=ndirty, r19=ifs, r22=iip
{ .mmi
st8 [r31]=r18,16 // ndirty
st8 [r30]=r19,16 // cfm
@@ -431,27 +422,10 @@ exception_save_restart:
;;
}
{ .mlx
- ssm psr.ic|psr.dfh
+ ssm psr.dt|psr.ic|psr.dfh
movl gp=__gp
;;
}
-
- /* PTC.G leave non-exclusive */
- srlz.d
- movl r25 = pmap_ptc_g_sem
- ;;
-.ptc_g_1:
- ld8.acq r26 = [r25]
- ;;
- mov ar.ccv = r26
- adds r27 = -1, r26
- ;;
- cmpxchg8.rel r27 = [r25], r27, ar.ccv
- ;;
- cmp.ne p12, p0 = r26, r27
-(p12) br.cond.spnt.few .ptc_g_1
- ;;
-
{ .mib
srlz.d
nop 0
@@ -469,34 +443,52 @@ END(exception_save)
ENTRY_NOPROFILE(exception_restore, 0)
{ .mmi
rsm psr.i
- add r3=SIZEOF_TRAPFRAME-16,sp
- add r2=SIZEOF_TRAPFRAME,sp
+ add sp=16,sp
+ nop 0
;;
}
-{ .mmi
+
+ // The next instruction can fault. Let it be...
+ tpa r9=sp
+ ;;
+ rsm psr.dt|psr.ic
+ add r8=SIZEOF_SPECIAL+16,r9
+ ;;
srlz.d
- add r8=SIZEOF_SPECIAL+32,sp
- nop 0
+ add r2=SIZEOF_TRAPFRAME-16,r9
+ add r3=SIZEOF_TRAPFRAME-32,r9
;;
-}
- // The next load can trap. Let it be...
+
+{ .mmi
ldf.fill f15=[r2],-32 // f15
ldf.fill f14=[r3],-32 // f14
- add sp=16,sp
+ nop 0
;;
+}
+{ .mmi
ldf.fill f13=[r2],-32 // f13
ldf.fill f12=[r3],-32 // f12
+ nop 0
;;
+}
+{ .mmi
ldf.fill f11=[r2],-32 // f11
ldf.fill f10=[r3],-32 // f10
+ nop 0
;;
+}
+{ .mmi
ldf.fill f9=[r2],-32 // f9
ldf.fill f8=[r3],-32 // f8
+ nop 0
;;
+}
+{ .mmi
ldf.fill f7=[r2],-24 // f7
ldf.fill f6=[r3],-16 // f6
+ nop 0
;;
-
+}
{ .mmi
ld8 r8=[r8] // unat (after)
;;
@@ -553,53 +545,53 @@ ENTRY_NOPROFILE(exception_restore, 0)
bsw.0
;;
}
+{ .mii
+ ld8 r16=[r9] // tf_length
+ add r31=16,r9
+ add r30=24,r9
+}
{ .mmi
ld8.fill r15=[r3],-16 // r15
ld8.fill r14=[r2],-16 // r14
- add r31=16,sp
+ nop 0
;;
}
{ .mmi
- ld8 r16=[sp] // tf_length
ld8.fill r11=[r3],-16 // r11
- add r30=24,sp
- ;;
-}
-{ .mmi
ld8.fill r10=[r2],-16 // r10
- ld8.fill r9=[r3],-16 // r9
add r16=r16,sp // ar.k7
;;
}
{ .mmi
+ ld8.fill r9=[r3],-16 // r9
ld8.fill r8=[r2],-16 // r8
- ld8.fill r3=[r3] // r3
+ nop 0
;;
}
- // We want nested TLB faults from here on...
- rsm psr.ic|psr.i
+{ .mmi
+ ld8.fill r3=[r3] // r3
ld8.fill r2=[r2] // r2
nop 0
;;
- srlz.d
- ld8.fill sp=[r31],16 // sp
- nop 0
- ;;
+}
+ ld8.fill sp=[r31],16 // sp
ld8 r17=[r30],16 // unat
- ld8 r29=[r31],16 // rp
;;
+ ld8 r29=[r31],16 // rp
ld8 r18=[r30],16 // pr
+ ;;
ld8 r28=[r31],16 // pfs
+ ld8 r20=[r30],24 // bspstore
mov rp=r29
;;
- ld8 r20=[r30],24 // bspstore
ld8 r21=[r31],24 // rnat
mov ar.pfs=r28
;;
ld8.fill r26=[r30],16 // tp
ld8 r22=[r31],16 // rsc
;;
+
{ .mmi
ld8 r23=[r30],16 // fpsr
ld8 r24=[r31],16 // psr
@@ -636,6 +628,11 @@ ENTRY_NOPROFILE(exception_restore, 0)
addl r29=NTLBRT_RESTORE,r0 // 22-bit restart token
;;
}
+
+ ssm psr.dt
+ ;;
+ srlz.d
+
exception_restore_restart:
{ .mmi
mov r30=ar.bspstore
@@ -1015,15 +1012,33 @@ IVT_ENTRY(Data_Nested_TLB, 0x1400)
// here are direct mapped region 7 addresses, we have no problem
// constructing physical addresses.
-{ .mlx
+{ .mmi
+ mov cr.ifa=r30
+ mov r26=rr[r30]
+ extr.u r27=r30,61,3
+ ;;
+}
+{ .mii
nop 0
- movl r27=ia64_kptdir
+ dep r26=0,r26,0,2
+ cmp.eq p12,p13=7,r27
;;
}
{ .mii
- ld8 r27=[r27]
- extr.u r28=r30,3*PAGE_SHIFT-8, PAGE_SHIFT-3 // dir L0 index
- extr.u r26=r30,2*PAGE_SHIFT-5, PAGE_SHIFT-3 // dir L1 index
+ mov cr.itir=r26
+(p12) dep r28=0,r30,61,3
+(p13) extr.u r28=r30,3*PAGE_SHIFT-8, PAGE_SHIFT-3 // dir L0 index
+ ;;
+}
+{ .mlx
+(p12) add r28=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX+PTE_MA_WB,r28
+(p13) movl r27=ia64_kptdir
+ ;;
+}
+{ .mib
+(p13) ld8 r27=[r27]
+(p13) extr.u r26=r30,2*PAGE_SHIFT-5, PAGE_SHIFT-3 // dir L1 index
+(p12) br.cond.spnt.few 1f
;;
}
{ .mmi
@@ -1040,58 +1055,48 @@ IVT_ENTRY(Data_Nested_TLB, 0x1400)
extr.u r28=r30,PAGE_SHIFT,PAGE_SHIFT-5 // pte index
;;
}
-{ .mmi
+{ .mii
shladd r27=r26,3,r27
+ shl r28=r28,5
;;
- mov r26=rr[r30]
dep r27=0,r27,61,3
;;
}
-{ .mii
ld8 r27=[r27] // pte page
- shl r28=r28,5
- dep r26=0,r26,0,2
;;
-}
-{ .mmi
add r27=r28,r27
;;
- mov cr.ifa=r30
dep r27=0,r27,61,3
;;
-}
-{ .mmi
- ld8 r28=[r27] // pte
+ ld8 r28=[r27] // pte
;;
- mov cr.itir=r26
or r28=PTE_DIRTY+PTE_ACCESSED,r28
;;
-}
-{ .mmi
st8 [r27]=r28
;;
- addl r26=NTLBRT_SAVE,r0
- addl r27=NTLBRT_RESTORE,r0
-}
+ ssm psr.dt
+ ;;
+1:
{ .mmi
itc.d r28
;;
- ssm psr.dt
- cmp.eq p12,p0=r29,r26
+ addl r26=NTLBRT_SAVE,r0
+ addl r27=NTLBRT_RESTORE,r0
;;
}
-{ .mib
+{ .mmi
srlz.d
+ cmp.eq p12,p0=r29,r26
cmp.eq p13,p0=r29,r27
-(p12) br.cond.sptk.few exception_save_restart
;;
}
-{ .mib
- nop 0
+{ .mbb
nop 0
+(p12) br.cond.sptk.few exception_save_restart
(p13) br.cond.sptk.few exception_restore_restart
;;
}
+
{ .mlx
mov r26=ar.bsp
movl r29=kstack
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index adb16ec..c2372b2 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -1,41 +1,33 @@
-/* $FreeBSD$ */
-/* $NetBSD: interrupt.c,v 1.23 1998/02/24 07:38:01 thorpej Exp $ */
-
/*-
- * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * Copyright (c) 2010-2011 Marcel Moolenaar
* All rights reserved.
*
- * Authors: Keith Bostic, Chris G. Demetriou
- *
- * Permission to use, copy, modify and distribute this software and
- * its documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * any improvements or extensions that they make and grant Carnegie the
- * rights to redistribute these changes.
- */
-/*-
- * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
- * Redistribute and modify at will, leaving only this additional copyright
- * notice.
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*/
#include "opt_ddb.h"
-#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
@@ -309,6 +301,7 @@ void
ia64_handle_intr(struct trapframe *tf)
{
struct thread *td;
+ struct trapframe *stf;
u_int xiv;
td = curthread;
@@ -323,17 +316,21 @@ ia64_handle_intr(struct trapframe *tf)
}
critical_enter();
+ stf = td->td_intr_frame;
+ td->td_intr_frame = tf;
do {
CTR2(KTR_INTR, "INTR: ITC=%u, XIV=%u",
(u_int)tf->tf_special.ifa, xiv);
- (ia64_handler[xiv])(td, xiv, tf);
- ia64_set_eoi(0);
- ia64_srlz_d();
+ if (!(ia64_handler[xiv])(td, xiv, tf)) {
+ ia64_set_eoi(0);
+ ia64_srlz_d();
+ }
xiv = ia64_get_ivr();
ia64_srlz_d();
} while (xiv != 15);
+ td->td_intr_frame = stf;
critical_exit();
out:
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index f3105fc..2bfd62e 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -347,6 +347,11 @@ cpu_startup(void *dummy)
SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
+ "nhardclocks", CTLFLAG_RD, &pcs->pcs_nhardclocks,
+ "Number of IPI_HARDCLOCK interrupts");
+
+ SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
+ SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
"nhighfps", CTLFLAG_RD, &pcs->pcs_nhighfps,
"Number of IPI_HIGH_FP interrupts");
@@ -416,12 +421,10 @@ cpu_idle(int busy)
{
register_t ie;
-#if 0
if (!busy) {
critical_enter();
cpu_idleclock();
}
-#endif
ie = intr_disable();
KASSERT(ie != 0, ("%s called with interrupts disabled\n", __func__));
@@ -436,12 +439,10 @@ cpu_idle(int busy)
ia64_enable_intr();
}
-#if 0
if (!busy) {
cpu_activeclock();
critical_exit();
}
-#endif
}
int
@@ -470,12 +471,12 @@ cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
if (PCPU_GET(fpcurthread) == old)
old->td_frame->tf_special.psr |= IA64_PSR_DFH;
if (!savectx(oldpcb)) {
- atomic_store_rel_ptr(&old->td_lock, mtx);
-
newpcb = new->td_pcb;
oldpcb->pcb_current_pmap =
pmap_switch(newpcb->pcb_current_pmap);
+ atomic_store_rel_ptr(&old->td_lock, mtx);
+
#if defined(SCHED_ULE) && defined(SMP)
while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
cpu_spinwait();
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index 15afea0..0d8f241 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -77,6 +77,7 @@ void ia64_ap_startup(void);
struct ia64_ap_state ia64_ap_state;
int ia64_ipi_ast;
+int ia64_ipi_hardclock;
int ia64_ipi_highfp;
int ia64_ipi_nmi;
int ia64_ipi_preempt;
@@ -108,6 +109,16 @@ ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf)
}
static u_int
+ia64_ih_hardclock(struct thread *td, u_int xiv, struct trapframe *tf)
+{
+
+ PCPU_INC(md.stats.pcs_nhardclocks);
+ CTR1(KTR_SMP, "IPI_HARDCLOCK, cpuid=%d", PCPU_GET(cpuid));
+ hardclockintr();
+ return (0);
+}
+
+static u_int
ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf)
{
@@ -139,18 +150,18 @@ ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
static u_int
ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
{
- cpuset_t mybit;
+ u_int cpuid;
PCPU_INC(md.stats.pcs_nstops);
- mybit = PCPU_GET(cpumask);
+ cpuid = PCPU_GET(cpuid);
savectx(PCPU_PTR(md.pcb));
- CPU_OR_ATOMIC(&stopped_cpus, &mybit);
- while (!CPU_OVERLAP(&started_cpus, &mybit))
+ CPU_SET_ATOMIC(cpuid, &stopped_cpus);
+ while (!CPU_ISSET(cpuid, &started_cpus))
cpu_spinwait();
- CPU_NAND_ATOMIC(&started_cpus, &mybit);
- CPU_NAND_ATOMIC(&stopped_cpus, &mybit);
+ CPU_CLR_ATOMIC(cpuid, &started_cpus);
+ CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
return (0);
}
@@ -233,10 +244,11 @@ ia64_ap_startup(void)
CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
- /* Mask interval timer interrupts on APs. */
- ia64_set_itv(0x10000);
+ cpu_initclocks();
+
ia64_set_tpr(0);
ia64_srlz_d();
+
ia64_enable_intr();
sched_throw(NULL);
@@ -359,8 +371,6 @@ cpu_mp_start()
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
pc->pc_md.current_pmap = kernel_pmap;
- pc->pc_other_cpus = all_cpus;
- CPU_NAND(&pc->pc_other_cpus, &pc->pc_cpumask);
/* The BSP is obviously running already. */
if (pc->pc_cpuid == 0) {
pc->pc_md.awake = 1;
@@ -413,6 +423,8 @@ cpu_mp_unleash(void *dummy)
/* Allocate XIVs for IPIs */
ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast);
+ ia64_ipi_hardclock = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI,
+ ia64_ih_hardclock);
ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp);
ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI,
ia64_ih_preempt);
@@ -464,7 +476,7 @@ ipi_selected(cpuset_t cpus, int ipi)
struct pcpu *pc;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
- if (CPU_OVERLAP(&cpus, &pc->pc_cpumask))
+ if (CPU_ISSET(pc->pc_cpuid, &cpus))
ipi_send(pc, ipi);
}
}
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 411d53a..0e34f36 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$");
#include <vm/uma.h>
#include <machine/bootinfo.h>
+#include <machine/efi.h>
#include <machine/md_var.h>
#include <machine/pal.h>
@@ -179,7 +180,7 @@ static uint64_t pmap_ptc_e_count2 = 2;
static uint64_t pmap_ptc_e_stride1 = 0x2000;
static uint64_t pmap_ptc_e_stride2 = 0x100000000;
-extern volatile u_long pmap_ptc_g_sem;
+struct mtx pmap_ptc_mutex;
/*
* Data for the RID allocator
@@ -338,6 +339,8 @@ pmap_bootstrap()
pmap_ptc_e_stride1,
pmap_ptc_e_stride2);
+ mtx_init(&pmap_ptc_mutex, "PTC.G mutex", NULL, MTX_SPIN);
+
/*
* Setup RIDs. RIDs 0..7 are reserved for the kernel.
*
@@ -481,6 +484,18 @@ pmap_vhpt_population(SYSCTL_HANDLER_ARGS)
return (error);
}
+vm_offset_t
+pmap_page_to_va(vm_page_t m)
+{
+ vm_paddr_t pa;
+ vm_offset_t va;
+
+ pa = VM_PAGE_TO_PHYS(m);
+ va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
+ IA64_PHYS_TO_RR7(pa);
+ return (va);
+}
+
/*
* Initialize a vm_page's machine-dependent fields.
*/
@@ -490,6 +505,7 @@ pmap_page_init(vm_page_t m)
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
+ m->md.memattr = VM_MEMATTR_DEFAULT;
}
/*
@@ -528,11 +544,11 @@ pmap_invalidate_page(vm_offset_t va)
{
struct ia64_lpte *pte;
struct pcpu *pc;
- uint64_t tag, sem;
- register_t is;
+ uint64_t tag;
u_int vhpt_ofs;
critical_enter();
+
vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
tag = ia64_ttag(va);
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
@@ -540,30 +556,16 @@ pmap_invalidate_page(vm_offset_t va)
atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
}
- /* PTC.G enter exclusive */
- is = intr_disable();
-
- /* Atomically assert writer after all writers have gone. */
- do {
- /* Wait until there's no more writer. */
- do {
- sem = atomic_load_acq_long(&pmap_ptc_g_sem);
- tag = sem | (1ul << 63);
- } while (sem == tag);
- } while (!atomic_cmpset_rel_long(&pmap_ptc_g_sem, sem, tag));
-
- /* Wait until all readers are gone. */
- tag = (1ul << 63);
- do {
- sem = atomic_load_acq_long(&pmap_ptc_g_sem);
- } while (sem != tag);
+ mtx_lock_spin(&pmap_ptc_mutex);
ia64_ptc_ga(va, PAGE_SHIFT << 2);
+ ia64_mf();
+ ia64_srlz_i();
- /* PTC.G leave exclusive */
- atomic_store_rel_long(&pmap_ptc_g_sem, 0);
+ mtx_unlock_spin(&pmap_ptc_mutex);
+
+ ia64_invala();
- intr_restore(is);
critical_exit();
}
@@ -714,8 +716,7 @@ pmap_growkernel(vm_offset_t addr)
if (!nkpg)
panic("%s: cannot add dir. page", __func__);
- dir1 = (struct ia64_lpte **)
- IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg));
+ dir1 = (struct ia64_lpte **)pmap_page_to_va(nkpg);
bzero(dir1, PAGE_SIZE);
ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1;
}
@@ -725,8 +726,7 @@ pmap_growkernel(vm_offset_t addr)
if (!nkpg)
panic("%s: cannot add PTE page", __func__);
- leaf = (struct ia64_lpte *)
- IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg));
+ leaf = (struct ia64_lpte *)pmap_page_to_va(nkpg);
bzero(leaf, PAGE_SIZE);
dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf;
@@ -786,7 +786,7 @@ get_pv_entry(pmap_t locked_pmap)
vpq = &vm_page_queues[PQ_INACTIVE];
retry:
TAILQ_FOREACH(m, &vpq->pl, pageq) {
- if (m->hold_count || m->busy)
+ if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
continue;
TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
va = pv->pv_va;
@@ -1137,6 +1137,14 @@ pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
}
+static PMAP_INLINE void
+pmap_pte_attr(struct ia64_lpte *pte, vm_memattr_t ma)
+{
+
+ pte->pte &= ~PTE_MA_MASK;
+ pte->pte |= (ma & PTE_MA_MASK);
+}
+
/*
* Set a pte to contain a valid mapping and enter it in the VHPT. If
* the pte was orginally valid, then its assumed to already be in the
@@ -1149,8 +1157,9 @@ pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
boolean_t wired, boolean_t managed)
{
- pte->pte &= PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED;
- pte->pte |= PTE_PRESENT | PTE_MA_WB;
+ pte->pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
+ PTE_AR_MASK | PTE_ED;
+ pte->pte |= PTE_PRESENT;
pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
pte->pte |= (wired) ? PTE_WIRED : 0;
pte->pte |= pa & PTE_PPN_MASK;
@@ -1267,6 +1276,7 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
else
pmap_enter_vhpt(pte, va);
pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
+ pmap_pte_attr(pte, m[i]->md.memattr);
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
va += PAGE_SIZE;
}
@@ -1308,6 +1318,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
else
pmap_enter_vhpt(pte, va);
pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
+ pmap_pte_attr(pte, VM_MEMATTR_DEFAULT);
pmap_set_pte(pte, va, pa, FALSE, FALSE);
}
@@ -1420,8 +1431,8 @@ pmap_remove_all(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
- KASSERT((m->flags & PG_FICTITIOUS) == 0,
- ("pmap_remove_all: page %p is fictitious", m));
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_remove_all: page %p is not managed", m));
vm_page_lock_queues();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
struct ia64_lpte *pte;
@@ -1618,6 +1629,7 @@ validate:
* adds the pte to the VHPT if necessary.
*/
pmap_pte_prot(pmap, pte, prot);
+ pmap_pte_attr(pte, m->md.memattr);
pmap_set_pte(pte, va, pa, wired, managed);
/* Invalidate the I-cache when needed. */
@@ -1723,6 +1735,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pmap_enter_vhpt(pte, va);
pmap_pte_prot(pmap, pte,
prot & (VM_PROT_READ | VM_PROT_EXECUTE));
+ pmap_pte_attr(pte, m->md.memattr);
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
if (prot & VM_PROT_EXECUTE)
@@ -1805,8 +1818,10 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
void
pmap_zero_page(vm_page_t m)
{
- vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
- bzero((caddr_t) va, PAGE_SIZE);
+ void *p;
+
+ p = (void *)pmap_page_to_va(m);
+ bzero(p, PAGE_SIZE);
}
@@ -1821,8 +1836,10 @@ pmap_zero_page(vm_page_t m)
void
pmap_zero_page_area(vm_page_t m, int off, int size)
{
- vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
- bzero((char *)(caddr_t)va + off, size);
+ char *p;
+
+ p = (void *)pmap_page_to_va(m);
+ bzero(p + off, size);
}
@@ -1835,8 +1852,10 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
void
pmap_zero_page_idle(vm_page_t m)
{
- vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
- bzero((caddr_t) va, PAGE_SIZE);
+ void *p;
+
+ p = (void *)pmap_page_to_va(m);
+ bzero(p, PAGE_SIZE);
}
@@ -1849,9 +1868,11 @@ pmap_zero_page_idle(vm_page_t m)
void
pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
{
- vm_offset_t src = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(msrc));
- vm_offset_t dst = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(mdst));
- bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE);
+ void *dst, *src;
+
+ src = (void *)pmap_page_to_va(msrc);
+ dst = (void *)pmap_page_to_va(mdst);
+ bcopy(src, dst, PAGE_SIZE);
}
/*
@@ -2198,6 +2219,7 @@ pmap_remove_write(vm_page_t m)
}
prot &= ~VM_PROT_WRITE;
pmap_pte_prot(pmap, pte, prot);
+ pmap_pte_attr(pte, m->md.memattr);
pmap_invalidate_page(pv->pv_va);
}
pmap_switch(oldpmap);
@@ -2214,12 +2236,37 @@ pmap_remove_write(vm_page_t m)
* NOT real memory.
*/
void *
-pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+pmap_mapdev(vm_paddr_t pa, vm_size_t sz)
{
+ static void *last_va = NULL;
+ static vm_paddr_t last_pa = 0;
+ static vm_size_t last_sz = 0;
+ struct efi_md *md;
vm_offset_t va;
- va = pa | IA64_RR_BASE(6);
- return ((void *)va);
+ if (pa == last_pa && sz == last_sz)
+ return (last_va);
+
+ md = efi_md_find(pa);
+ if (md == NULL) {
+ printf("%s: [%#lx..%#lx] not covered by memory descriptor\n",
+ __func__, pa, pa + sz - 1);
+ return (NULL);
+ }
+
+ if (md->md_type == EFI_MD_TYPE_FREE) {
+ printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa,
+ pa + sz - 1);
+ return (NULL);
+ }
+
+ va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
+ IA64_PHYS_TO_RR6(pa);
+
+ last_va = (void *)va;
+ last_pa = pa;
+ last_sz = sz;
+ return (last_va);
}
/*
@@ -2231,6 +2278,63 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
}
/*
+ * Sets the memory attribute for the specified page.
+ */
+static void
+pmap_page_set_memattr_1(void *arg)
+{
+ struct ia64_pal_result res;
+ register_t is;
+ uintptr_t pp = (uintptr_t)arg;
+
+ is = intr_disable();
+ res = ia64_call_pal_static(pp, 0, 0, 0);
+ intr_restore(is);
+}
+
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+ struct ia64_lpte *pte;
+ pmap_t oldpmap;
+ pv_entry_t pv;
+ void *va;
+
+ vm_page_lock_queues();
+ m->md.memattr = ma;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ PMAP_LOCK(pv->pv_pmap);
+ oldpmap = pmap_switch(pv->pv_pmap);
+ pte = pmap_find_vhpt(pv->pv_va);
+ KASSERT(pte != NULL, ("pte"));
+ pmap_pte_attr(pte, ma);
+ pmap_invalidate_page(pv->pv_va);
+ pmap_switch(oldpmap);
+ PMAP_UNLOCK(pv->pv_pmap);
+ }
+ vm_page_unlock_queues();
+
+ if (ma == VM_MEMATTR_UNCACHEABLE) {
+#ifdef SMP
+ smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
+ (void *)PAL_PREFETCH_VISIBILITY);
+#else
+ pmap_page_set_memattr_1((void *)PAL_PREFETCH_VISIBILITY);
+#endif
+ va = (void *)pmap_page_to_va(m);
+ critical_enter();
+ cpu_flush_dcache(va, PAGE_SIZE);
+ critical_exit();
+#ifdef SMP
+ smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
+ (void *)PAL_MC_DRAIN);
+#else
+ pmap_page_set_memattr_1((void *)PAL_MC_DRAIN);
+#endif
+ }
+}
+
+/*
* perform the pmap work for mincore
*/
int
@@ -2240,7 +2344,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
struct ia64_lpte *pte, tpte;
vm_paddr_t pa;
int val;
-
+
PMAP_LOCK(pmap);
retry:
oldpmap = pmap_switch(pmap);
diff --git a/sys/ia64/include/efi.h b/sys/ia64/include/efi.h
index f32f3fa..02bef10 100644
--- a/sys/ia64/include/efi.h
+++ b/sys/ia64/include/efi.h
@@ -161,8 +161,11 @@ void efi_boot_finish(void);
int efi_boot_minimal(uint64_t);
void *efi_get_table(struct uuid *);
void efi_get_time(struct efi_tm *);
+struct efi_md *efi_md_find(vm_paddr_t);
struct efi_md *efi_md_first(void);
+struct efi_md *efi_md_last(void);
struct efi_md *efi_md_next(struct efi_md *);
+struct efi_md *efi_md_prev(struct efi_md *);
void efi_reset_system(void);
int efi_set_time(struct efi_tm *);
int efi_var_get(efi_char *, struct uuid *, uint32_t *, size_t *, void *);
diff --git a/sys/ia64/include/ia64_cpu.h b/sys/ia64/include/ia64_cpu.h
index bb8284d..74e649b 100644
--- a/sys/ia64/include/ia64_cpu.h
+++ b/sys/ia64/include/ia64_cpu.h
@@ -266,7 +266,7 @@ ia64_ptc_e(uint64_t v)
static __inline void
ia64_ptc_g(uint64_t va, uint64_t log2size)
{
- __asm __volatile("ptc.g %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size));
+ __asm __volatile("ptc.g %0,%1;;" :: "r"(va), "r"(log2size));
}
/*
@@ -275,7 +275,7 @@ ia64_ptc_g(uint64_t va, uint64_t log2size)
static __inline void
ia64_ptc_ga(uint64_t va, uint64_t log2size)
{
- __asm __volatile("ptc.ga %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size));
+ __asm __volatile("ptc.ga %0,%1;;" :: "r"(va), "r"(log2size));
}
/*
@@ -288,6 +288,15 @@ ia64_ptc_l(uint64_t va, uint64_t log2size)
}
/*
+ * Invalidate the ALAT on the local processor.
+ */
+static __inline void
+ia64_invala(void)
+{
+ __asm __volatile("invala;;");
+}
+
+/*
* Unordered memory load.
*/
diff --git a/sys/ia64/include/pcpu.h b/sys/ia64/include/pcpu.h
index 05e2cc1..5ad61ba 100644
--- a/sys/ia64/include/pcpu.h
+++ b/sys/ia64/include/pcpu.h
@@ -37,6 +37,7 @@ struct pcpu_stats {
u_long pcs_nasts; /* IPI_AST counter. */
u_long pcs_nclks; /* Clock interrupt counter. */
u_long pcs_nextints; /* ExtINT counter. */
+ u_long pcs_nhardclocks; /* IPI_HARDCLOCK counter. */
u_long pcs_nhighfps; /* IPI_HIGH_FP counter. */
u_long pcs_nhwints; /* Hardware int. counter. */
u_long pcs_npreempts; /* IPI_PREEMPT counter. */
@@ -51,7 +52,8 @@ struct pcpu_md {
vm_offset_t vhpt; /* Address of VHPT */
uint64_t lid; /* local CPU ID */
uint64_t clock; /* Clock counter. */
- uint64_t clockadj; /* Clock adjust. */
+ uint64_t clock_load; /* Clock reload value. */
+ uint32_t clock_mode; /* Clock ET mode */
uint32_t awake:1; /* CPU is awake? */
struct pcpu_stats stats; /* Interrupt stats. */
#ifdef _KERNEL
diff --git a/sys/ia64/include/pmap.h b/sys/ia64/include/pmap.h
index 6f3b320..2b89df0 100644
--- a/sys/ia64/include/pmap.h
+++ b/sys/ia64/include/pmap.h
@@ -68,6 +68,7 @@ struct pv_entry;
struct md_page {
int pv_list_count;
TAILQ_HEAD(,pv_entry) pv_list;
+ vm_memattr_t memattr;
};
struct pmap {
@@ -115,21 +116,22 @@ extern vm_offset_t virtual_end;
extern uint64_t pmap_vhpt_base[];
extern int pmap_vhpt_log2size;
-#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
+#define pmap_page_get_memattr(m) ((m)->md.memattr)
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
-#define pmap_page_set_memattr(m, ma) (void)0
#define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz)
#define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz)
-vm_offset_t pmap_steal_memory(vm_size_t);
vm_offset_t pmap_alloc_vhpt(void);
void pmap_bootstrap(void);
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
-void pmap_unmapdev(vm_offset_t, vm_size_t);
+void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+vm_offset_t pmap_page_to_va(vm_page_t);
+vm_offset_t pmap_steal_memory(vm_size_t);
struct pmap *pmap_switch(struct pmap *pmap);
+void pmap_unmapdev(vm_offset_t, vm_size_t);
#endif /* _KERNEL */
diff --git a/sys/ia64/include/sf_buf.h b/sys/ia64/include/sf_buf.h
index 8d67542..75bcdfa 100644
--- a/sys/ia64/include/sf_buf.h
+++ b/sys/ia64/include/sf_buf.h
@@ -41,18 +41,20 @@
*/
struct sf_buf;
-static __inline vm_offset_t
-sf_buf_kva(struct sf_buf *sf)
+static __inline vm_page_t
+sf_buf_page(struct sf_buf *sf)
{
-
- return (IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS((vm_page_t)sf)));
+
+ return ((vm_page_t)sf);
}
-static __inline vm_page_t
-sf_buf_page(struct sf_buf *sf)
+static __inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
{
+ vm_page_t m;
- return ((vm_page_t)sf);
+ m = sf_buf_page(sf);
+ return (pmap_page_to_va(m));
}
#endif /* !_MACHINE_SF_BUF_H_ */
diff --git a/sys/ia64/include/smp.h b/sys/ia64/include/smp.h
index d2aff76..b80d6a0 100644
--- a/sys/ia64/include/smp.h
+++ b/sys/ia64/include/smp.h
@@ -7,6 +7,7 @@
#ifdef _KERNEL
#define IPI_AST ia64_ipi_ast
+#define IPI_HARDCLOCK ia64_ipi_hardclock
#define IPI_PREEMPT ia64_ipi_preempt
#define IPI_RENDEZVOUS ia64_ipi_rndzvs
#define IPI_STOP ia64_ipi_stop
@@ -37,6 +38,7 @@ struct ia64_ap_state {
};
extern int ia64_ipi_ast;
+extern int ia64_ipi_hardclock;
extern int ia64_ipi_highfp;
extern int ia64_ipi_nmi;
extern int ia64_ipi_preempt;
OpenPOWER on IntegriCloud