summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-03-17 23:39:52 +0000
committerattilio <attilio@FreeBSD.org>2013-03-17 23:39:52 +0000
commitd500d6361abe652aef27a2c034835bc2a60155ad (patch)
tree660c72c3a1b0cbf41943fa591b5ed5d1f31c50f6 /sys
parenta69d85af8b2642ba2d10b8aa37bc58a3123ec1c6 (diff)
parent960294c20051941b358bdc3d71e05fa682614d6f (diff)
downloadFreeBSD-src-d500d6361abe652aef27a2c034835bc2a60155ad.zip
FreeBSD-src-d500d6361abe652aef27a2c034835bc2a60155ad.tar.gz
MFC
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c24
-rw-r--r--sys/amd64/vmm/intel/vmx.c7
-rw-r--r--sys/amd64/vmm/vmm.c8
-rw-r--r--sys/amd64/vmm/vmm_stat.c13
-rw-r--r--sys/amd64/vmm/vmm_stat.h27
-rw-r--r--sys/arm/arm/bcopy_page.S4
-rw-r--r--sys/arm/arm/bcopyinout.S5
-rw-r--r--sys/arm/arm/bcopyinout_xscale.S4
-rw-r--r--sys/arm/arm/blockio.S11
-rw-r--r--sys/arm/arm/bus_space_asm_generic.S23
-rw-r--r--sys/arm/arm/copystr.S3
-rw-r--r--sys/arm/arm/cpufunc_asm.S15
-rw-r--r--sys/arm/arm/cpufunc_asm_arm10.S13
-rw-r--r--sys/arm/arm/cpufunc_asm_arm11.S12
-rw-r--r--sys/arm/arm/cpufunc_asm_arm11x6.S9
-rw-r--r--sys/arm/arm/cpufunc_asm_arm7tdmi.S6
-rw-r--r--sys/arm/arm/cpufunc_asm_arm8.S12
-rw-r--r--sys/arm/arm/cpufunc_asm_arm9.S11
-rw-r--r--sys/arm/arm/cpufunc_asm_armv4.S6
-rw-r--r--sys/arm/arm/cpufunc_asm_armv5.S9
-rw-r--r--sys/arm/arm/cpufunc_asm_armv5_ec.S9
-rw-r--r--sys/arm/arm/cpufunc_asm_armv6.S10
-rw-r--r--sys/arm/arm/cpufunc_asm_armv7.S16
-rw-r--r--sys/arm/arm/cpufunc_asm_fa526.S15
-rw-r--r--sys/arm/arm/cpufunc_asm_ixp12x0.S2
-rw-r--r--sys/arm/arm/cpufunc_asm_pj4b.S15
-rw-r--r--sys/arm/arm/cpufunc_asm_sa1.S21
-rw-r--r--sys/arm/arm/cpufunc_asm_sa11x0.S5
-rw-r--r--sys/arm/arm/cpufunc_asm_sheeva.S11
-rw-r--r--sys/arm/arm/cpufunc_asm_xscale.S27
-rw-r--r--sys/arm/arm/cpufunc_asm_xscale_c3.S20
-rw-r--r--sys/arm/arm/db_trace.c23
-rw-r--r--sys/arm/arm/elf_trampoline.c15
-rw-r--r--sys/arm/arm/exception.S11
-rw-r--r--sys/arm/arm/fiq_subr.S2
-rw-r--r--sys/arm/arm/fusu.S14
-rw-r--r--sys/arm/arm/in_cksum_arm.S6
-rw-r--r--sys/arm/arm/irq_dispatch.S1
-rw-r--r--sys/arm/arm/locore.S10
-rw-r--r--sys/arm/arm/pmap-v6.c39
-rw-r--r--sys/arm/arm/pmap.c93
-rw-r--r--sys/arm/arm/setcpsr.S2
-rw-r--r--sys/arm/arm/support.S8
-rw-r--r--sys/arm/arm/swtch.S6
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_sdhci.c116
-rw-r--r--sys/arm/include/asm.h14
-rw-r--r--sys/arm/include/bus.h8
-rw-r--r--sys/arm/include/pmap.h2
-rw-r--r--sys/arm/ti/cpsw/if_cpsw.c2
-rw-r--r--sys/arm/ti/ti_mmchs.c5
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c2
-rw-r--r--sys/compat/ndis/kern_ndis.c8
-rw-r--r--sys/conf/Makefile.arm41
-rw-r--r--sys/conf/NOTES2
-rw-r--r--sys/conf/files4
-rw-r--r--sys/conf/files.arm2
-rw-r--r--sys/dev/acpica/acpi_powerres.c2
-rw-r--r--sys/dev/ath/if_ath.c111
-rw-r--r--sys/dev/ath/if_ath_sysctl.c6
-rw-r--r--sys/dev/ath/if_ath_tx.c11
-rw-r--r--sys/dev/ath/if_athvar.h10
-rw-r--r--sys/dev/puc/pucdata.c20
-rw-r--r--sys/dev/sound/pcm/sndstat.c8
-rw-r--r--sys/fs/cd9660/cd9660_vnops.c2
-rw-r--r--sys/fs/ext2fs/ext2_balloc.c2
-rw-r--r--sys/fs/ext2fs/ext2_vnops.c9
-rw-r--r--sys/fs/msdosfs/msdosfs_vnops.c4
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c1
-rw-r--r--sys/fs/udf/udf_vnops.c5
-rw-r--r--sys/geom/gate/g_gate.c2
-rw-r--r--sys/i386/i386/pmap.c43
-rw-r--r--sys/i386/xen/pmap.c40
-rw-r--r--sys/ia64/ia64/pmap.c24
-rw-r--r--sys/kern/capabilities.conf2
-rw-r--r--sys/kern/kern_racct.c6
-rw-r--r--sys/kern/subr_trap.c11
-rw-r--r--sys/kern/uipc_mbuf.c109
-rw-r--r--sys/kern/uipc_sockbuf.c4
-rw-r--r--sys/kern/uipc_syscalls.c10
-rw-r--r--sys/kern/vfs_bio.c45
-rw-r--r--sys/kern/vfs_cluster.c26
-rw-r--r--sys/kern/vfs_syscalls.c9
-rw-r--r--sys/kern/vfs_vnops.c45
-rw-r--r--sys/libkern/arm/aeabi_unwind.c58
-rw-r--r--sys/libkern/arm/divsi3.S13
-rw-r--r--sys/libkern/arm/ffs.S2
-rw-r--r--sys/libkern/arm/ldivmod.S2
-rw-r--r--sys/libkern/arm/memcpy.S3
-rw-r--r--sys/libkern/arm/memset.S49
-rw-r--r--sys/mips/mips/pmap.c45
-rw-r--r--sys/modules/uart/Makefile3
-rw-r--r--sys/net/bridgestp.c4
-rw-r--r--sys/net/if_gre.c3
-rw-r--r--sys/net/rtsock.c19
-rw-r--r--sys/netinet/igmp.c4
-rw-r--r--sys/netinet/ip_carp.c4
-rw-r--r--sys/netinet/ip_input.c2
-rw-r--r--sys/netinet/ip_mroute.c7
-rw-r--r--sys/netinet/ip_options.c4
-rw-r--r--sys/netinet/ip_output.c4
-rw-r--r--sys/netinet/libalias/alias.c16
-rw-r--r--sys/netinet/tcp_output.c22
-rw-r--r--sys/netinet/tcp_subr.c2
-rw-r--r--sys/netinet6/icmp6.c100
-rw-r--r--sys/netinet6/ip6_input.c35
-rw-r--r--sys/netinet6/ip6_mroute.c5
-rw-r--r--sys/netinet6/ip6_output.c47
-rw-r--r--sys/netinet6/mld6.c6
-rw-r--r--sys/netinet6/nd6_nbr.c26
-rw-r--r--sys/netipsec/key.c154
-rw-r--r--sys/netpfil/pf/pf.c2
-rw-r--r--sys/nfs/nfs_common.c4
-rw-r--r--sys/powerpc/aim/mmu_oea.c27
-rw-r--r--sys/powerpc/aim/mmu_oea64.c69
-rw-r--r--sys/powerpc/booke/pmap.c33
-rw-r--r--sys/powerpc/powerpc/mmu_if.m8
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c10
-rw-r--r--sys/sparc64/sparc64/pmap.c8
-rw-r--r--sys/sys/buf.h6
-rw-r--r--sys/sys/mbuf.h14
-rw-r--r--sys/sys/vnode.h2
-rw-r--r--sys/ufs/ffs/ffs_balloc.c12
-rw-r--r--sys/ufs/ffs/ffs_vnops.c6
-rw-r--r--sys/ufs/ufs/ufs_vnops.c1
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_kern.c8
-rw-r--r--sys/vm/vnode_pager.c2
-rw-r--r--sys/xdr/xdr_mbuf.c7
128 files changed, 1556 insertions, 680 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 324fc69..55d2cff 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -4235,6 +4235,30 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
pagecopy((void *)src, (void *)dst);
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_cp = (char *)PHYS_TO_DMAP(ma[a_offset >> PAGE_SHIFT]->
+ phys_addr) + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_cp = (char *)PHYS_TO_DMAP(mb[b_offset >> PAGE_SHIFT]->
+ phys_addr) + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 287ac8c..8db79ce 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -153,10 +153,7 @@ static int cap_unrestricted_guest;
static int cap_monitor_trap;
/* statistics */
-static VMM_STAT_DEFINE(VCPU_MIGRATIONS, "vcpu migration across host cpus");
-static VMM_STAT_DEFINE(VMEXIT_EXTINT, "vm exits due to external interrupt");
-static VMM_STAT_DEFINE(VMEXIT_HLT_IGNORED, "number of times hlt was ignored");
-static VMM_STAT_DEFINE(VMEXIT_HLT, "number of times hlt was intercepted");
+static VMM_STAT_INTEL(VMEXIT_HLT_IGNORED, "number of times hlt was ignored");
#ifdef KTR
static const char *
@@ -1216,6 +1213,8 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
qual = vmexit->u.vmx.exit_qualification;
vmexit->exitcode = VM_EXITCODE_BOGUS;
+ vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
+
switch (vmexit->u.vmx.exit_reason) {
case EXIT_REASON_CR_ACCESS:
handled = vmx_emulate_cr_access(vmx, vcpu, qual);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 85d277e..1de4470 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -139,7 +139,7 @@ static MALLOC_DEFINE(M_VM, "vm", "vm");
CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
/* statistics */
-static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
+static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
static void
vcpu_cleanup(struct vcpu *vcpu)
@@ -612,7 +612,7 @@ save_guest_fpustate(struct vcpu *vcpu)
fpu_start_emulating();
}
-static VMM_STAT_DEFINE(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
+static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
int
vm_run(struct vm *vm, struct vm_run *vmrun)
@@ -717,7 +717,7 @@ vm_inject_event(struct vm *vm, int vcpuid, int type,
return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
}
-static VMM_STAT_DEFINE(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
+static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
int
vm_inject_nmi(struct vm *vm, int vcpuid)
@@ -937,7 +937,7 @@ vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
- if (state < 0 || state >= X2APIC_STATE_LAST)
+ if (state >= X2APIC_STATE_LAST)
return (EINVAL);
vm->vcpu[vcpuid].x2apic_state = state;
diff --git a/sys/amd64/vmm/vmm_stat.c b/sys/amd64/vmm/vmm_stat.c
index ae60979..ae156ee 100644
--- a/sys/amd64/vmm/vmm_stat.c
+++ b/sys/amd64/vmm/vmm_stat.c
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <machine/vmm.h>
+#include "vmm_util.h"
#include "vmm_stat.h"
static int vstnum;
@@ -52,6 +53,12 @@ vmm_stat_init(void *arg)
if (vst->desc == NULL)
return;
+ if (vst->scope == VMM_STAT_SCOPE_INTEL && !vmm_is_intel())
+ return;
+
+ if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_amd())
+ return;
+
if (vstnum >= MAX_VMM_STAT_TYPES) {
printf("Cannot accomodate vmm stat type \"%s\"!\n", vst->desc);
return;
@@ -102,3 +109,9 @@ vmm_stat_desc(int index)
else
return (NULL);
}
+
+/* global statistics */
+VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
+VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
+VMM_STAT(VMEXIT_EXTINT, "vm exits due to external interrupt");
+VMM_STAT(VMEXIT_HLT, "number of times hlt was intercepted");
diff --git a/sys/amd64/vmm/vmm_stat.h b/sys/amd64/vmm/vmm_stat.h
index 7c075a6..a1c0967 100644
--- a/sys/amd64/vmm/vmm_stat.h
+++ b/sys/amd64/vmm/vmm_stat.h
@@ -36,19 +36,36 @@ struct vm;
#define MAX_VMM_STAT_TYPES 64 /* arbitrary */
+enum vmm_stat_scope {
+ VMM_STAT_SCOPE_ANY,
+ VMM_STAT_SCOPE_INTEL, /* Intel VMX specific statistic */
+ VMM_STAT_SCOPE_AMD, /* AMD SVM specific statistic */
+};
+
struct vmm_stat_type {
- const char *desc; /* description of statistic */
int index; /* position in the stats buffer */
+ const char *desc; /* description of statistic */
+ enum vmm_stat_scope scope;
};
void vmm_stat_init(void *arg);
-#define VMM_STAT_DEFINE(type, desc) \
+#define VMM_STAT_DEFINE(type, desc, scope) \
struct vmm_stat_type type[1] = { \
- { desc, -1 } \
+ { -1, desc, scope } \
}; \
SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_init, type)
+#define VMM_STAT_DECLARE(type) \
+ extern struct vmm_stat_type type[1]
+
+#define VMM_STAT(type, desc) \
+ VMM_STAT_DEFINE(type, desc, VMM_STAT_SCOPE_ANY)
+#define VMM_STAT_INTEL(type, desc) \
+ VMM_STAT_DEFINE(type, desc, VMM_STAT_SCOPE_INTEL)
+#define VMM_STAT_AMD(type, desc) \
+ VMM_STAT_DEFINE(type, desc, VMM_STAT_SCOPE_AMD)
+
void *vmm_stat_alloc(void);
void vmm_stat_free(void *vp);
@@ -68,4 +85,8 @@ vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
#endif
}
+VMM_STAT_DECLARE(VCPU_MIGRATIONS);
+VMM_STAT_DECLARE(VMEXIT_COUNT);
+VMM_STAT_DECLARE(VMEXIT_EXTINT);
+VMM_STAT_DECLARE(VMEXIT_HLT);
#endif
diff --git a/sys/arm/arm/bcopy_page.S b/sys/arm/arm/bcopy_page.S
index 27921d4..92e38cc 100644
--- a/sys/arm/arm/bcopy_page.S
+++ b/sys/arm/arm/bcopy_page.S
@@ -117,6 +117,7 @@ ENTRY(bcopy_page)
bne 1b
RESTORE_REGS /* ...and return. */
+END(bcopy_page)
/*
* bzero_page(dest)
@@ -178,6 +179,7 @@ ENTRY(bzero_page)
bne 1b
ldmfd sp!, {r4-r8, pc}
+END(bzero_page)
#else /* _ARM_ARCH_5E */
@@ -246,6 +248,7 @@ ENTRY(bcopy_page)
bgt 1b
ldmfd sp!, {r4, r5}
RET
+END(bcopy_page)
/*
* armv5e version of bzero_page
@@ -273,4 +276,5 @@ ENTRY(bzero_page)
subs r1, r1, #128
bne 1b
RET
+END(bzero_page)
#endif /* _ARM_ARCH_5E */
diff --git a/sys/arm/arm/bcopyinout.S b/sys/arm/arm/bcopyinout.S
index 992d0d7..68fdf20 100644
--- a/sys/arm/arm/bcopyinout.S
+++ b/sys/arm/arm/bcopyinout.S
@@ -312,6 +312,7 @@ ENTRY(copyin)
RESTORE_REGS
RET
+END(copyin)
/*
* r0 = kernel space address
@@ -538,6 +539,7 @@ ENTRY(copyout)
RESTORE_REGS
RET
+END(copyout)
#endif
/*
@@ -564,6 +566,7 @@ ENTRY(badaddr_read_1)
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
RET
+END(badaddr_read_1)
/*
* int badaddr_read_2(const uint16_t *src, uint16_t *dest)
@@ -589,6 +592,7 @@ ENTRY(badaddr_read_2)
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
RET
+END(badaddr_read_2)
/*
* int badaddr_read_4(const uint32_t *src, uint32_t *dest)
@@ -614,4 +618,5 @@ ENTRY(badaddr_read_4)
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
RET
+END(badaddr_read_4)
diff --git a/sys/arm/arm/bcopyinout_xscale.S b/sys/arm/arm/bcopyinout_xscale.S
index a2853cc..2cb98d9 100644
--- a/sys/arm/arm/bcopyinout_xscale.S
+++ b/sys/arm/arm/bcopyinout_xscale.S
@@ -492,7 +492,7 @@ ENTRY(copyin)
ldrbt ip, [r0]
strb ip, [r1]
RET
-
+END(copyin)
/*
* r0 = kernel space address
@@ -935,3 +935,5 @@ ENTRY(copyout)
ldrb ip, [r0]
strbt ip, [r1]
RET
+END(copyout)
+
diff --git a/sys/arm/arm/blockio.S b/sys/arm/arm/blockio.S
index 7e750b4..d121f2c 100644
--- a/sys/arm/arm/blockio.S
+++ b/sys/arm/arm/blockio.S
@@ -101,6 +101,7 @@ ENTRY(read_multi_1)
ldrgtb r3, [r0]
strgtb r3, [r1], #1
ldmdb fp, {fp, sp, pc}
+END(read_multi_1)
/*
* Write bytes to an I/O address from a block of memory
@@ -152,6 +153,7 @@ ENTRY(write_multi_1)
ldrgtb r3, [r1], #1
strgtb r3, [r0]
ldmdb fp, {fp, sp, pc}
+END(write_multi_1)
/*
* Reads short ints (16 bits) from an I/O address into a block of memory
@@ -199,7 +201,7 @@ ENTRY(insw)
bgt .Lfastinswloop
RET
-
+END(insw)
/*
* Writes short ints (16 bits) from a block of memory to an I/O address
@@ -260,6 +262,7 @@ ENTRY(outsw)
bgt .Lfastoutswloop
RET
+END(outsw)
/*
* reads short ints (16 bits) from an I/O address into a block of memory
@@ -318,7 +321,7 @@ ENTRY(insw16)
bgt .Linsw16loop
ldmfd sp!, {r4,r5,pc} /* Restore regs and go home */
-
+END(insw16)
/*
* Writes short ints (16 bits) from a block of memory to an I/O address
@@ -385,6 +388,7 @@ ENTRY(outsw16)
bgt .Loutsw16loop
ldmfd sp!, {r4,r5,pc} /* and go home */
+END(outsw16)
/*
* reads short ints (16 bits) from an I/O address into a block of memory
@@ -481,6 +485,7 @@ ENTRY(inswm8)
.Linswm8_l1:
ldmfd sp!, {r4-r9,pc} /* And go home */
+END(inswm8)
/*
* write short ints (16 bits) to an I/O address from a block of memory
@@ -585,3 +590,5 @@ ENTRY(outswm8)
.Loutswm8_l1:
ldmfd sp!, {r4-r8,pc} /* And go home */
+END(outswm8)
+
diff --git a/sys/arm/arm/bus_space_asm_generic.S b/sys/arm/arm/bus_space_asm_generic.S
index 2492474..4aa7197 100644
--- a/sys/arm/arm/bus_space_asm_generic.S
+++ b/sys/arm/arm/bus_space_asm_generic.S
@@ -50,14 +50,17 @@ __FBSDID("$FreeBSD$");
ENTRY(generic_bs_r_1)
ldrb r0, [r1, r2]
RET
+END(generic_bs_r_1)
ENTRY(generic_armv4_bs_r_2)
ldrh r0, [r1, r2]
RET
+END(generic_armv4_bs_r_2)
ENTRY(generic_bs_r_4)
ldr r0, [r1, r2]
RET
+END(generic_bs_r_4)
/*
* write single
@@ -66,14 +69,17 @@ ENTRY(generic_bs_r_4)
ENTRY(generic_bs_w_1)
strb r3, [r1, r2]
RET
+END(generic_bs_w_1)
ENTRY(generic_armv4_bs_w_2)
strh r3, [r1, r2]
RET
+END(generic_armv4_bs_w_2)
ENTRY(generic_bs_w_4)
str r3, [r1, r2]
RET
+END(generic_bs_w_4)
/*
* read multiple
@@ -92,6 +98,7 @@ ENTRY(generic_bs_rm_1)
bne 1b
RET
+END(generic_bs_rm_1)
ENTRY(generic_armv4_bs_rm_2)
add r0, r1, r2
@@ -106,6 +113,7 @@ ENTRY(generic_armv4_bs_rm_2)
bne 1b
RET
+END(generic_armv4_bs_rm_2)
ENTRY(generic_bs_rm_4)
add r0, r1, r2
@@ -120,6 +128,7 @@ ENTRY(generic_bs_rm_4)
bne 1b
RET
+END(generic_bs_rm_4)
/*
* write multiple
@@ -138,6 +147,7 @@ ENTRY(generic_bs_wm_1)
bne 1b
RET
+END(generic_bs_wm_1)
ENTRY(generic_armv4_bs_wm_2)
add r0, r1, r2
@@ -152,6 +162,7 @@ ENTRY(generic_armv4_bs_wm_2)
bne 1b
RET
+END(generic_armv4_bs_wm_2)
ENTRY(generic_bs_wm_4)
add r0, r1, r2
@@ -166,6 +177,7 @@ ENTRY(generic_bs_wm_4)
bne 1b
RET
+END(generic_bs_wm_4)
/*
* read region
@@ -184,6 +196,7 @@ ENTRY(generic_bs_rr_1)
bne 1b
RET
+END(generic_bs_rr_1)
ENTRY(generic_armv4_bs_rr_2)
add r0, r1, r2
@@ -198,6 +211,7 @@ ENTRY(generic_armv4_bs_rr_2)
bne 1b
RET
+END(generic_armv4_bs_rr_2)
ENTRY(generic_bs_rr_4)
add r0, r1, r2
@@ -212,6 +226,7 @@ ENTRY(generic_bs_rr_4)
bne 1b
RET
+END(generic_bs_rr_4)
/*
* write region.
@@ -230,6 +245,7 @@ ENTRY(generic_bs_wr_1)
bne 1b
RET
+END(generic_bs_wr_1)
ENTRY(generic_armv4_bs_wr_2)
add r0, r1, r2
@@ -244,6 +260,7 @@ ENTRY(generic_armv4_bs_wr_2)
bne 1b
RET
+END(generic_armv4_bs_wr_2)
ENTRY(generic_bs_wr_4)
add r0, r1, r2
@@ -258,6 +275,7 @@ ENTRY(generic_bs_wr_4)
bne 1b
RET
+END(generic_bs_wr_4)
/*
* set region
@@ -275,6 +293,7 @@ ENTRY(generic_bs_sr_1)
bne 1b
RET
+END(generic_bs_sr_1)
ENTRY(generic_armv4_bs_sr_2)
add r0, r1, r2
@@ -288,6 +307,7 @@ ENTRY(generic_armv4_bs_sr_2)
bne 1b
RET
+END(generic_armv4_bs_sr_2)
ENTRY(generic_bs_sr_4)
add r0, r1, r2
@@ -301,6 +321,7 @@ ENTRY(generic_bs_sr_4)
bne 1b
RET
+END(generic_bs_sr_4)
/*
* copy region
@@ -335,3 +356,5 @@ ENTRY(generic_armv4_bs_c_2)
bne 3b
RET
+END(generic_armv4_bs_c_2)
+
diff --git a/sys/arm/arm/copystr.S b/sys/arm/arm/copystr.S
index 9eb8682..83b7ec7 100644
--- a/sys/arm/arm/copystr.S
+++ b/sys/arm/arm/copystr.S
@@ -93,6 +93,7 @@ ENTRY(copystr)
ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
RET
+END(copystr)
#define SAVE_REGS stmfd sp!, {r4-r6}
#define RESTORE_REGS ldmfd sp!, {r4-r6}
@@ -143,6 +144,7 @@ ENTRY(copyinstr)
RESTORE_REGS
RET
+END(copyinstr)
/*
* r0 - kernel space address
@@ -190,6 +192,7 @@ ENTRY(copyoutstr)
RESTORE_REGS
RET
+END(copyoutstr)
/* A fault occurred during the copy */
.Lcopystrfault:
diff --git a/sys/arm/arm/cpufunc_asm.S b/sys/arm/arm/cpufunc_asm.S
index 1709796..eeff722 100644
--- a/sys/arm/arm/cpufunc_asm.S
+++ b/sys/arm/arm/cpufunc_asm.S
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
ENTRY(cpufunc_nullop)
RET
+END(cpufunc_nullop)
/*
* Generic functions to read the internal coprocessor registers
@@ -64,27 +65,32 @@ ENTRY(cpufunc_nullop)
ENTRY(cpufunc_id)
mrc p15, 0, r0, c0, c0, 0
RET
+END(cpufunc_id)
ENTRY(cpufunc_cpuid)
mrc p15, 0, r0, c0, c0, 0
RET
+END(cpufunc_cpuid)
ENTRY(cpu_get_control)
mrc p15, 0, r0, c1, c0, 0
RET
+END(cpu_get_control)
ENTRY(cpu_read_cache_config)
mrc p15, 0, r0, c0, c0, 1
RET
+END(cpu_read_cache_config)
ENTRY(cpufunc_faultstatus)
mrc p15, 0, r0, c5, c0, 0
RET
+END(cpufunc_faultstatus)
ENTRY(cpufunc_faultaddress)
mrc p15, 0, r0, c6, c0, 0
RET
-
+END(cpufunc_faultaddress)
/*
* Generic functions to write the internal coprocessor registers
@@ -101,11 +107,13 @@ ENTRY(cpufunc_faultaddress)
ENTRY(cpufunc_control)
mcr p15, 0, r0, c1, c0, 0
RET
+END(cpufunc_control)
#endif
ENTRY(cpufunc_domains)
mcr p15, 0, r0, c3, c0, 0
RET
+END(cpufunc_domains)
/*
* Generic functions to read/modify/write the internal coprocessor registers
@@ -131,6 +139,8 @@ ENTRY(cpufunc_control)
.Lglou:
.asciz "plop %p\n"
.align 0
+END(cpufunc_control)
+
/*
* other potentially useful software functions are:
* clean D cache entry and flush I cache entry
@@ -157,6 +167,7 @@ ENTRY(get_pc_str_offset)
ldr r0, [sp]
sub r0, r0, r1
ldmdb fp, {fp, sp, pc}
+END(get_pc_str_offset)
/* Allocate and lock a cacheline for the specified address. */
@@ -180,3 +191,5 @@ ENTRY(arm_lock_cache_line)
mcr p15, 0, r1, c9, c2, 0 /* Disable data cache lock mode */
CPWAIT()
RET
+END(arm_lock_cache_line)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm10.S b/sys/arm/arm/cpufunc_asm_arm10.S
index 2ef999c..654219b 100644
--- a/sys/arm/arm/cpufunc_asm_arm10.S
+++ b/sys/arm/arm/cpufunc_asm_arm10.S
@@ -50,6 +50,7 @@ ENTRY(arm10_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
bx lr
+END(arm10_setttb)
/*
* TLB functions
@@ -58,11 +59,12 @@ ENTRY(arm10_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
bx lr
+END(arm10_tlb_flushID_SE)
ENTRY(arm10_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
bx lr
-
+END(arm10_tlb_flushI_SE)
/*
* Cache operations. For the entire cache we use the set/index
@@ -90,6 +92,7 @@ ENTRY_NP(arm10_icache_sync_range)
bhi .Larm10_sync_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_icache_sync_range)
ENTRY_NP(arm10_icache_sync_all)
.Larm10_icache_sync_all:
@@ -114,6 +117,7 @@ ENTRY_NP(arm10_icache_sync_all)
bhs .Lnext_set /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_icache_sync_all)
.Larm10_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -134,6 +138,7 @@ ENTRY(arm10_dcache_wb_range)
bhi .Larm10_wb_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_dcache_wb_range)
ENTRY(arm10_dcache_wbinv_range)
ldr ip, .Larm10_line_size
@@ -151,6 +156,7 @@ ENTRY(arm10_dcache_wbinv_range)
bhi .Larm10_wbinv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -172,6 +178,7 @@ ENTRY(arm10_dcache_inv_range)
bhi .Larm10_inv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_dcache_inv_range)
ENTRY(arm10_idcache_wbinv_range)
ldr ip, .Larm10_line_size
@@ -190,6 +197,7 @@ ENTRY(arm10_idcache_wbinv_range)
bhi .Larm10_id_wbinv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_idcache_wbinv_range)
ENTRY_NP(arm10_idcache_wbinv_all)
.Larm10_idcache_wbinv_all:
@@ -215,6 +223,8 @@ ENTRY(arm10_dcache_wbinv_all)
bhs .Lnext_set_inv /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_idcache_wbinv_all)
+END(arm10_dcache_wbinv_all)
.Larm10_cache_data:
.word _C_LABEL(arm10_dcache_sets_max)
@@ -242,6 +252,7 @@ ENTRY(arm10_context_switch)
nop
nop
bx lr
+END(arm10_context_switch)
.bss
diff --git a/sys/arm/arm/cpufunc_asm_arm11.S b/sys/arm/arm/cpufunc_asm_arm11.S
index b8d8f19..723afc6 100644
--- a/sys/arm/arm/cpufunc_asm_arm11.S
+++ b/sys/arm/arm/cpufunc_asm_arm11.S
@@ -55,6 +55,7 @@ ENTRY(arm11_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(arm11_setttb)
/*
* TLB functions
@@ -64,12 +65,13 @@ ENTRY(arm11_tlb_flushID_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(arm11_tlb_flushID_SE)
ENTRY(arm11_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
-
+END(arm11_tlb_flushI_SE)
/*
* Context switch.
@@ -94,6 +96,7 @@ ENTRY(arm11_context_switch)
nop
nop
RET
+END(arm11_context_switch)
/*
* TLB functions
@@ -102,21 +105,25 @@ ENTRY(arm11_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushID)
ENTRY(arm11_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushI)
ENTRY(arm11_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushD)
ENTRY(arm11_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushD_SE)
/*
* Other functions
@@ -124,8 +131,11 @@ ENTRY(arm11_tlb_flushD_SE)
ENTRY(arm11_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_drain_writebuf)
ENTRY_NP(arm11_sleep)
mov r0, #0
mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
RET
+END(arm11_sleep)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm11x6.S b/sys/arm/arm/cpufunc_asm_arm11x6.S
index e223208..6c7eb56 100644
--- a/sys/arm/arm/cpufunc_asm_arm11x6.S
+++ b/sys/arm/arm/cpufunc_asm_arm11x6.S
@@ -124,24 +124,29 @@ ENTRY(arm11x6_setttb)
mcr p15, 0, r1, c8, c7, 0 /* invalidate I+D TLBs */
mcr p15, 0, r1, c7, c10, 4 /* drain write buffer */
RET
+END(arm11x6_setttb)
ENTRY_NP(arm11x6_idcache_wbinv_all)
Flush_D_cache(r0)
Invalidate_I_cache(r0, r1)
RET
+END(arm11x6_idcache_wbinv_all)
ENTRY_NP(arm11x6_dcache_wbinv_all)
Flush_D_cache(r0)
RET
+END(arm11x6_dcache_wbinv_all)
ENTRY_NP(arm11x6_icache_sync_all)
Flush_D_cache(r0)
Invalidate_I_cache(r0, r1)
RET
+END(arm11x6_icache_sync_all)
ENTRY_NP(arm11x6_flush_prefetchbuf)
mcr p15, 0, r0, c7, c5, 4 /* Flush Prefetch Buffer */
RET
+END(arm11x6_flush_prefetchbuf)
ENTRY_NP(arm11x6_icache_sync_range)
add r1, r1, r0
@@ -168,6 +173,7 @@ ENTRY_NP(arm11x6_icache_sync_range)
mcrr p15, 0, r1, r0, c12 /* clean and invalidate D cache range */ /* XXXNH */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(arm11x6_icache_sync_range)
ENTRY_NP(arm11x6_idcache_wbinv_range)
add r1, r1, r0
@@ -194,6 +200,7 @@ ENTRY_NP(arm11x6_idcache_wbinv_range)
mcrr p15, 0, r1, r0, c14 /* clean and invalidate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(arm11x6_idcache_wbinv_range)
/*
* Preload the cache before issuing the WFI by conditionally disabling the
@@ -216,3 +223,5 @@ ENTRY_NP(arm11x6_sleep)
nop
bne 1b
RET
+END(arm11x6_sleep)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm7tdmi.S b/sys/arm/arm/cpufunc_asm_arm7tdmi.S
index fed6f16..2ac2502 100644
--- a/sys/arm/arm/cpufunc_asm_arm7tdmi.S
+++ b/sys/arm/arm/cpufunc_asm_arm7tdmi.S
@@ -60,6 +60,7 @@ ENTRY(arm7tdmi_setttb)
bl _C_LABEL(arm7tdmi_cache_flushID)
mov pc, r2
+END(arm7tdmi_setttb)
/*
* TLB functions
@@ -68,10 +69,12 @@ ENTRY(arm7tdmi_tlb_flushID)
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
RET
+END(arm7tdmi_tlb_flushID)
ENTRY(arm7tdmi_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1
RET
+END(arm7tdmi_tlb_flushID_SE)
/*
* Cache functions
@@ -86,6 +89,7 @@ ENTRY(arm7tdmi_cache_flushID)
mov r0, r0
RET
+END(arm7tdmi_cache_flushID)
/*
* Context switch.
@@ -98,3 +102,5 @@ ENTRY(arm7tdmi_cache_flushID)
*/
ENTRY(arm7tdmi_context_switch)
b _C_LABEL(arm7tdmi_setttb)
+END(arm7tdmi_context_switch)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm8.S b/sys/arm/arm/cpufunc_asm_arm8.S
index 9f23548..2cb8b11 100644
--- a/sys/arm/arm/cpufunc_asm_arm8.S
+++ b/sys/arm/arm/cpufunc_asm_arm8.S
@@ -58,6 +58,7 @@ ENTRY(arm8_clock_config)
mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
mov r0, r3 /* Return old value */
RET
+END(arm8_clock_config)
/*
* Functions to set the MMU Translation Table Base register
@@ -90,6 +91,7 @@ ENTRY(arm8_setttb)
msr cpsr_all, r3
RET
+END(arm8_setttb)
/*
* TLB functions
@@ -97,10 +99,12 @@ ENTRY(arm8_setttb)
ENTRY(arm8_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
RET
+END(arm8_tlb_flushID)
ENTRY(arm8_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
RET
+END(arm8_tlb_flushID_SE)
/*
* Cache functions
@@ -108,10 +112,12 @@ ENTRY(arm8_tlb_flushID_SE)
ENTRY(arm8_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
RET
+END(arm8_cache_flushID)
ENTRY(arm8_cache_flushID_E)
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
RET
+END(arm8_cache_flushID_E)
ENTRY(arm8_cache_cleanID)
mov r0, #0x00000000
@@ -153,10 +159,12 @@ ENTRY(arm8_cache_cleanID)
bne 1b
RET
+END(arm8_cache_cleanID)
ENTRY(arm8_cache_cleanID_E)
mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
RET
+END(arm8_cache_cleanID_E)
ENTRY(arm8_cache_purgeID)
/*
@@ -232,6 +240,7 @@ ENTRY(arm8_cache_purgeID)
msr cpsr_all, r3
RET
+END(arm8_cache_purgeID)
ENTRY(arm8_cache_purgeID_E)
/*
@@ -253,6 +262,7 @@ ENTRY(arm8_cache_purgeID_E)
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
msr cpsr_all, r3
RET
+END(arm8_cache_purgeID_E)
/*
* Context switch.
@@ -282,3 +292,5 @@ ENTRY(arm8_context_switch)
mov r0, r0
mov r0, r0
RET
+END(arm8_context_switch)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm9.S b/sys/arm/arm/cpufunc_asm_arm9.S
index ae9fe00..dd29479 100644
--- a/sys/arm/arm/cpufunc_asm_arm9.S
+++ b/sys/arm/arm/cpufunc_asm_arm9.S
@@ -49,6 +49,7 @@ ENTRY(arm9_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mov pc, lr
+END(arm9_setttb)
/*
* TLB functions
@@ -57,6 +58,7 @@ ENTRY(arm9_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mov pc, lr
+END(arm9_tlb_flushID_SE)
/*
* Cache operations. For the entire cache we use the set/index
@@ -83,6 +85,7 @@ ENTRY_NP(arm9_icache_sync_range)
subs r1, r1, ip
bhi .Larm9_sync_next
mov pc, lr
+END(arm9_icache_sync_range)
ENTRY_NP(arm9_icache_sync_all)
.Larm9_icache_sync_all:
@@ -106,6 +109,7 @@ ENTRY_NP(arm9_icache_sync_all)
subs s_max, s_max, s_inc
bhs .Lnext_set /* Next set */
mov pc, lr
+END(arm9_icache_sync_all)
.Larm9_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -125,6 +129,7 @@ ENTRY(arm9_dcache_wb_range)
subs r1, r1, ip
bhi .Larm9_wb_next
mov pc, lr
+END(arm9_dcache_wb_range)
ENTRY(arm9_dcache_wbinv_range)
ldr ip, .Larm9_line_size
@@ -141,6 +146,7 @@ ENTRY(arm9_dcache_wbinv_range)
subs r1, r1, ip
bhi .Larm9_wbinv_next
mov pc, lr
+END(arm9_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -161,6 +167,7 @@ ENTRY(arm9_dcache_inv_range)
subs r1, r1, ip
bhi .Larm9_inv_next
mov pc, lr
+END(arm9_dcache_inv_range)
ENTRY(arm9_idcache_wbinv_range)
ldr ip, .Larm9_line_size
@@ -178,6 +185,7 @@ ENTRY(arm9_idcache_wbinv_range)
subs r1, r1, ip
bhi .Larm9_id_wbinv_next
mov pc, lr
+END(arm9_idcache_wbinv_range)
ENTRY_NP(arm9_idcache_wbinv_all)
.Larm9_idcache_wbinv_all:
@@ -202,6 +210,8 @@ ENTRY(arm9_dcache_wbinv_all)
subs s_max, s_max, s_inc
bhs .Lnext_set_inv /* Next set */
mov pc, lr
+END(arm9_idcache_wbinv_all)
+END(arm9_dcache_wbinv_all)
.Larm9_cache_data:
.word _C_LABEL(arm9_dcache_sets_max)
@@ -229,6 +239,7 @@ ENTRY(arm9_context_switch)
nop
nop
mov pc, lr
+END(arm9_context_switch)
.bss
diff --git a/sys/arm/arm/cpufunc_asm_armv4.S b/sys/arm/arm/cpufunc_asm_armv4.S
index 1b8797d..1123e4a 100644
--- a/sys/arm/arm/cpufunc_asm_armv4.S
+++ b/sys/arm/arm/cpufunc_asm_armv4.S
@@ -46,18 +46,22 @@ __FBSDID("$FreeBSD$");
ENTRY(armv4_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
RET
+END(armv4_tlb_flushID)
ENTRY(armv4_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
RET
+END(armv4_tlb_flushI)
ENTRY(armv4_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
RET
+END(armv4_tlb_flushD)
ENTRY(armv4_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
RET
+END(armv4_tlb_flushD_SE)
/*
* Other functions
@@ -65,3 +69,5 @@ ENTRY(armv4_tlb_flushD_SE)
ENTRY(armv4_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(armv4_drain_writebuf)
+
diff --git a/sys/arm/arm/cpufunc_asm_armv5.S b/sys/arm/arm/cpufunc_asm_armv5.S
index 2faa5f4..94e6b43 100644
--- a/sys/arm/arm/cpufunc_asm_armv5.S
+++ b/sys/arm/arm/cpufunc_asm_armv5.S
@@ -51,6 +51,7 @@ ENTRY(armv5_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(armv5_setttb)
/*
* Cache operations. For the entire cache we use the set/index
@@ -79,6 +80,7 @@ ENTRY_NP(armv5_icache_sync_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_icache_sync_range)
ENTRY_NP(armv5_icache_sync_all)
.Larmv5_icache_sync_all:
@@ -105,6 +107,7 @@ ENTRY_NP(armv5_icache_sync_all)
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_icache_sync_all)
.Larmv5_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -126,6 +129,7 @@ ENTRY(armv5_dcache_wb_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_dcache_wb_range)
ENTRY(armv5_dcache_wbinv_range)
ldr ip, .Larmv5_line_size
@@ -144,6 +148,7 @@ ENTRY(armv5_dcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -166,6 +171,7 @@ ENTRY(armv5_dcache_inv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_dcache_inv_range)
ENTRY(armv5_idcache_wbinv_range)
ldr ip, .Larmv5_line_size
@@ -185,6 +191,7 @@ ENTRY(armv5_idcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_idcache_wbinv_range)
ENTRY_NP(armv5_idcache_wbinv_all)
.Larmv5_idcache_wbinv_all:
@@ -212,6 +219,8 @@ ENTRY(armv5_dcache_wbinv_all)
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_idcache_wbinv_all)
+END(armv5_dcache_wbinv_all)
.Larmv5_cache_data:
.word _C_LABEL(armv5_dcache_sets_max)
diff --git a/sys/arm/arm/cpufunc_asm_armv5_ec.S b/sys/arm/arm/cpufunc_asm_armv5_ec.S
index 4012563..a86ac80 100644
--- a/sys/arm/arm/cpufunc_asm_armv5_ec.S
+++ b/sys/arm/arm/cpufunc_asm_armv5_ec.S
@@ -66,6 +66,7 @@ ENTRY(armv5_ec_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(armv5_ec_setttb)
/*
* Cache operations. For the entire cache we use the enhanced cache
@@ -90,6 +91,7 @@ ENTRY_NP(armv5_ec_icache_sync_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_icache_sync_range)
ENTRY_NP(armv5_ec_icache_sync_all)
.Larmv5_ec_icache_sync_all:
@@ -107,6 +109,7 @@ ENTRY_NP(armv5_ec_icache_sync_all)
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_icache_sync_all)
.Larmv5_ec_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -128,6 +131,7 @@ ENTRY(armv5_ec_dcache_wb_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_wb_range)
ENTRY(armv5_ec_dcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
@@ -146,6 +150,7 @@ ENTRY(armv5_ec_dcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -168,6 +173,7 @@ ENTRY(armv5_ec_dcache_inv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_inv_range)
ENTRY(armv5_ec_idcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
@@ -187,6 +193,7 @@ ENTRY(armv5_ec_idcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_idcache_wbinv_range)
ENTRY_NP(armv5_ec_idcache_wbinv_all)
.Larmv5_ec_idcache_wbinv_all:
@@ -197,6 +204,7 @@ ENTRY_NP(armv5_ec_idcache_wbinv_all)
*/
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
/* Fall through to purge Dcache. */
+END(armv5_ec_idcache_wbinv_all)
ENTRY(armv5_ec_dcache_wbinv_all)
.Larmv5_ec_dcache_wbinv_all:
@@ -204,4 +212,5 @@ ENTRY(armv5_ec_dcache_wbinv_all)
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_wbinv_all)
diff --git a/sys/arm/arm/cpufunc_asm_armv6.S b/sys/arm/arm/cpufunc_asm_armv6.S
index f735754..b8a2d9c 100644
--- a/sys/arm/arm/cpufunc_asm_armv6.S
+++ b/sys/arm/arm/cpufunc_asm_armv6.S
@@ -59,6 +59,7 @@ ENTRY(armv6_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(armv6_setttb)
/*
* Cache operations.
@@ -72,6 +73,7 @@ ENTRY_NP(armv6_icache_sync_range)
mcrr p15, 0, r1, r0, c12 /* clean D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_icache_sync_range)
/* LINTSTUB: void armv6_icache_sync_all(void); */
ENTRY_NP(armv6_icache_sync_all)
@@ -84,6 +86,7 @@ ENTRY_NP(armv6_icache_sync_all)
mcr p15, 0, r0, c7, c10, 0 /* Clean D cache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_icache_sync_all)
/* LINTSTUB: void armv6_dcache_wb_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wb_range)
@@ -92,6 +95,7 @@ ENTRY(armv6_dcache_wb_range)
mcrr p15, 0, r1, r0, c12 /* clean D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_wb_range)
/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wbinv_range)
@@ -100,6 +104,7 @@ ENTRY(armv6_dcache_wbinv_range)
mcrr p15, 0, r1, r0, c14 /* clean and invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -113,6 +118,7 @@ ENTRY(armv6_dcache_inv_range)
mcrr p15, 0, r1, r0, c6 /* invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_inv_range)
/* LINTSTUB: void armv6_idcache_wbinv_range(vaddr_t, vsize_t); */
ENTRY(armv6_idcache_wbinv_range)
@@ -122,6 +128,7 @@ ENTRY(armv6_idcache_wbinv_range)
mcrr p15, 0, r1, r0, c14 /* clean & invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_idcache_wbinv_range)
/* LINTSTUB: void armv6_idcache_wbinv_all(void); */
ENTRY_NP(armv6_idcache_wbinv_all)
@@ -138,3 +145,6 @@ ENTRY(armv6_dcache_wbinv_all)
mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_idcache_wbinv_all)
+END(armv6_dcache_wbinv_all)
+
diff --git a/sys/arm/arm/cpufunc_asm_armv7.S b/sys/arm/arm/cpufunc_asm_armv7.S
index 58f295c..2b4be85 100644
--- a/sys/arm/arm/cpufunc_asm_armv7.S
+++ b/sys/arm/arm/cpufunc_asm_armv7.S
@@ -78,6 +78,7 @@ ENTRY(armv7_setttb)
dsb
isb
RET
+END(armv7_setttb)
ENTRY(armv7_tlb_flushID)
dsb
@@ -91,6 +92,7 @@ ENTRY(armv7_tlb_flushID)
dsb
isb
mov pc, lr
+END(armv7_tlb_flushID)
ENTRY(armv7_tlb_flushID_SE)
ldr r1, .Lpage_mask
@@ -105,6 +107,7 @@ ENTRY(armv7_tlb_flushID_SE)
dsb
isb
mov pc, lr
+END(armv7_tlb_flushID_SE)
/* Based on algorithm from ARM Architecture Reference Manual */
ENTRY(armv7_dcache_wbinv_all)
@@ -157,6 +160,7 @@ Finished:
dsb
ldmia sp!, {r4, r5, r6, r7, r8, r9}
RET
+END(armv7_dcache_wbinv_all)
ENTRY(armv7_idcache_wbinv_all)
stmdb sp!, {lr}
@@ -170,6 +174,7 @@ ENTRY(armv7_idcache_wbinv_all)
isb
ldmia sp!, {lr}
RET
+END(armv7_idcache_wbinv_all)
/* XXX Temporary set it to 32 for MV cores, however this value should be
* get from Cache Type register
@@ -190,6 +195,7 @@ ENTRY(armv7_dcache_wb_range)
bhi .Larmv7_wb_next
dsb /* data synchronization barrier */
RET
+END(armv7_dcache_wb_range)
ENTRY(armv7_dcache_wbinv_range)
ldr ip, .Larmv7_line_size
@@ -204,6 +210,7 @@ ENTRY(armv7_dcache_wbinv_range)
bhi .Larmv7_wbinv_next
dsb /* data synchronization barrier */
RET
+END(armv7_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -222,6 +229,7 @@ ENTRY(armv7_dcache_inv_range)
bhi .Larmv7_inv_next
dsb /* data synchronization barrier */
RET
+END(armv7_dcache_inv_range)
ENTRY(armv7_idcache_wbinv_range)
ldr ip, .Larmv7_line_size
@@ -238,6 +246,7 @@ ENTRY(armv7_idcache_wbinv_range)
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
+END(armv7_idcache_wbinv_range)
ENTRY_NP(armv7_icache_sync_range)
ldr ip, .Larmv7_line_size
@@ -250,11 +259,13 @@ ENTRY_NP(armv7_icache_sync_range)
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
+END(armv7_icache_sync_range)
ENTRY(armv7_cpu_sleep)
dsb /* data synchronization barrier */
wfi /* wait for interrupt */
RET
+END(armv7_cpu_sleep)
ENTRY(armv7_context_switch)
dsb
@@ -269,16 +280,19 @@ ENTRY(armv7_context_switch)
dsb
isb
RET
+END(armv7_context_switch)
ENTRY(armv7_drain_writebuf)
dsb
RET
+END(armv7_drain_writebuf)
ENTRY(armv7_sev)
dsb
sev
nop
RET
+END(armv7_sev)
ENTRY(armv7_auxctrl)
mrc p15, 0, r2, c1, c0, 1
@@ -289,3 +303,5 @@ ENTRY(armv7_auxctrl)
mcrne p15, 0, r3, c1, c0, 1
mov r0, r2
RET
+END(armv7_auxctrl)
+
diff --git a/sys/arm/arm/cpufunc_asm_fa526.S b/sys/arm/arm/cpufunc_asm_fa526.S
index d53d29a..55c2f37 100644
--- a/sys/arm/arm/cpufunc_asm_fa526.S
+++ b/sys/arm/arm/cpufunc_asm_fa526.S
@@ -54,6 +54,7 @@ ENTRY(fa526_setttb)
mov r0, r0
mov r0, r0
mov pc, lr
+END(fa526_setttb)
/*
* TLB functions
@@ -61,6 +62,7 @@ ENTRY(fa526_setttb)
ENTRY(fa526_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush Utlb single entry */
mov pc, lr
+END(fa526_tlb_flushID_SE)
/*
* TLB functions
@@ -68,6 +70,7 @@ ENTRY(fa526_tlb_flushID_SE)
ENTRY(fa526_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush Itlb single entry */
mov pc, lr
+END(fa526_tlb_flushI_SE)
ENTRY(fa526_cpu_sleep)
mov r0, #0
@@ -75,11 +78,13 @@ ENTRY(fa526_cpu_sleep)
nop*/
mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt*/
mov pc, lr
+END(fa526_cpu_sleep)
ENTRY(fa526_flush_prefetchbuf)
mov r0, #0
mcr p15, 0, r0, c7, c5, 4 /* Pre-fetch flush */
mov pc, lr
+END(fa526_flush_prefetchbuf)
/*
* Cache functions
@@ -90,17 +95,20 @@ ENTRY(fa526_idcache_wbinv_all)
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_idcache_wbinv_all)
ENTRY(fa526_icache_sync_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ */
mov pc, lr
+END(fa526_icache_sync_all)
ENTRY(fa526_dcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate D$ */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_dcache_wbinv_all)
/*
* Soft functions
@@ -120,6 +128,7 @@ ENTRY(fa526_dcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_dcache_wbinv_range)
ENTRY(fa526_dcache_wb_range)
cmp r1, #0x4000
@@ -140,6 +149,7 @@ ENTRY(fa526_dcache_wb_range)
3: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_dcache_wb_range)
ENTRY(fa526_dcache_inv_range)
and r2, r0, #(CACHELINE_SIZE - 1)
@@ -152,6 +162,7 @@ ENTRY(fa526_dcache_inv_range)
bhi 1b
mov pc, lr
+END(fa526_dcache_inv_range)
ENTRY(fa526_idcache_wbinv_range)
cmp r1, #0x4000
@@ -169,6 +180,7 @@ ENTRY(fa526_idcache_wbinv_range)
2: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_idcache_wbinv_range)
ENTRY(fa526_icache_sync_range)
cmp r1, #0x4000
@@ -186,11 +198,13 @@ ENTRY(fa526_icache_sync_range)
2: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_icache_sync_range)
ENTRY(fa526_flush_brnchtgt_E)
mov r0, #0
mcr p15, 0, r0, c7, c5, 6 /* invalidate BTB cache */
mov pc, lr
+END(fa526_flush_brnchtgt_E)
ENTRY(fa526_context_switch)
/*
@@ -210,4 +224,5 @@ ENTRY(fa526_context_switch)
mov r0, r0
mov r0, r0
mov pc, lr
+END(fa526_context_switch)
diff --git a/sys/arm/arm/cpufunc_asm_ixp12x0.S b/sys/arm/arm/cpufunc_asm_ixp12x0.S
index efc5950..481cf0d 100644
--- a/sys/arm/arm/cpufunc_asm_ixp12x0.S
+++ b/sys/arm/arm/cpufunc_asm_ixp12x0.S
@@ -61,10 +61,12 @@ ENTRY(ixp12x0_context_switch)
mov r0, r0
mov r0, r0
RET
+END(ixp12x0_context_switch)
ENTRY(ixp12x0_drain_readbuf)
mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
RET
+END(ixp12x0_drain_readbuf)
/*
* Information for the IXP12X0 cache clean/purge functions:
diff --git a/sys/arm/arm/cpufunc_asm_pj4b.S b/sys/arm/arm/cpufunc_asm_pj4b.S
index f6890d9..2e325f3 100644
--- a/sys/arm/arm/cpufunc_asm_pj4b.S
+++ b/sys/arm/arm/cpufunc_asm_pj4b.S
@@ -46,6 +46,7 @@ ENTRY(pj4b_setttb)
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(pj4b_setttb)
ENTRY_NP(armv6_icache_sync_all)
/*
@@ -58,6 +59,7 @@ ENTRY_NP(armv6_icache_sync_all)
mcr p15, 0, r0, c7, c10, 0 /* Clean (don't invalidate) DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_icache_sync_all)
ENTRY(pj4b_icache_sync_range)
sub r1, r1, #1
@@ -66,6 +68,7 @@ ENTRY(pj4b_icache_sync_range)
mcrr p15, 0, r1, r0, c12 /* clean DC range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_icache_sync_range)
ENTRY(pj4b_dcache_inv_range)
ldr ip, .Lpj4b_cache_line_size
@@ -84,6 +87,7 @@ ENTRY(pj4b_dcache_inv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_dcache_inv_range)
ENTRY(armv6_idcache_wbinv_all)
mov r0, #0
@@ -91,12 +95,14 @@ ENTRY(armv6_idcache_wbinv_all)
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_idcache_wbinv_all)
ENTRY(armv6_dcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_wbinv_all)
ENTRY(pj4b_idcache_wbinv_range)
ldr ip, .Lpj4b_cache_line_size
@@ -121,6 +127,7 @@ ENTRY(pj4b_idcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_idcache_wbinv_range)
ENTRY(pj4b_dcache_wbinv_range)
ldr ip, .Lpj4b_cache_line_size
@@ -144,6 +151,7 @@ ENTRY(pj4b_dcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_dcache_wbinv_range)
ENTRY(pj4b_dcache_wb_range)
ldr ip, .Lpj4b_cache_line_size
@@ -167,22 +175,27 @@ ENTRY(pj4b_dcache_wb_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_dcache_wb_range)
ENTRY(pj4b_drain_readbuf)
mcr p15, 0, r0, c7, c5, 4 /* flush prefetch buffers */
RET
+END(pj4b_drain_readbuf)
ENTRY(pj4b_flush_brnchtgt_all)
mcr p15, 0, r0, c7, c5, 6 /* flush entrie branch target cache */
RET
+END(pj4b_flush_brnchtgt_all)
ENTRY(pj4b_flush_brnchtgt_va)
mcr p15, 0, r0, c7, c5, 7 /* flush branch target cache by VA */
RET
+END(pj4b_flush_brnchtgt_va)
ENTRY(get_core_id)
mrc p15, 0, r0, c0, c0, 5
RET
+END(get_core_id)
ENTRY(pj4b_config)
/* Set Auxiliary Debug Modes Control 2 register */
@@ -200,3 +213,5 @@ ENTRY(pj4b_config)
mcr p15, 0, r0, c1, c0, 1
#endif
RET
+END(pj4b_config)
+
diff --git a/sys/arm/arm/cpufunc_asm_sa1.S b/sys/arm/arm/cpufunc_asm_sa1.S
index 0bdd6e7..99cd4f1 100644
--- a/sys/arm/arm/cpufunc_asm_sa1.S
+++ b/sys/arm/arm/cpufunc_asm_sa1.S
@@ -85,6 +85,8 @@ ENTRY(sa1_setttb)
str r2, [r3]
#endif
RET
+END(getttb)
+END(sa1_setttb)
/*
* TLB functions
@@ -93,6 +95,7 @@ ENTRY(sa1_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
RET
+END(sa1_tlb_flushID_SE)
/*
* Cache functions
@@ -100,22 +103,27 @@ ENTRY(sa1_tlb_flushID_SE)
ENTRY(sa1_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
RET
+END(sa1_cache_flushID)
ENTRY(sa1_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
RET
+END(sa1_cache_flushI)
ENTRY(sa1_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
RET
+END(sa1_cache_flushD)
ENTRY(sa1_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
RET
+END(sa1_cache_flushD_SE)
ENTRY(sa1_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
RET
+END(sa1_cache_cleanD_E)
/*
* Information for the SA-1 cache clean/purge functions:
@@ -196,6 +204,11 @@ ENTRY(sa1_cache_cleanD)
SA1_CACHE_CLEAN_EPILOGUE
RET
+END(sa1_cache_syncI)
+END(sa1_cache_purgeID)
+END(sa1_cache_cleanID)
+END(sa1_cache_purgeD)
+END(sa1_cache_cleanD)
ENTRY(sa1_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
@@ -203,12 +216,14 @@ ENTRY(sa1_cache_purgeID_E)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
RET
+END(sa1_cache_purgeID_E)
ENTRY(sa1_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
RET
+END(sa1_cache_purgeD_E)
/*
* Soft functions
@@ -231,6 +246,8 @@ ENTRY(sa1_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(sa1_cache_cleanID_rng)
+END(sa1_cache_cleanD_rng)
ENTRY(sa1_cache_purgeID_rng)
cmp r1, #0x4000
@@ -249,6 +266,7 @@ ENTRY(sa1_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
RET
+END(sa1_cache_purgeID_rng)
ENTRY(sa1_cache_purgeD_rng)
cmp r1, #0x4000
@@ -266,6 +284,7 @@ ENTRY(sa1_cache_purgeD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(sa1_cache_purgeD_rng)
ENTRY(sa1_cache_syncI_rng)
cmp r1, #0x4000
@@ -284,6 +303,7 @@ ENTRY(sa1_cache_syncI_rng)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
RET
+END(sa1_cache_syncI_rng)
/*
* Context switch.
@@ -313,4 +333,5 @@ ENTRY(sa110_context_switch)
mov r0, r0
mov r0, r0
RET
+END(sa110_context_switch)
#endif
diff --git a/sys/arm/arm/cpufunc_asm_sa11x0.S b/sys/arm/arm/cpufunc_asm_sa11x0.S
index ca167c8..17efc8f 100644
--- a/sys/arm/arm/cpufunc_asm_sa11x0.S
+++ b/sys/arm/arm/cpufunc_asm_sa11x0.S
@@ -95,7 +95,7 @@ ENTRY(sa11x0_cpu_sleep)
/* Restore interrupts (which will cause them to be serviced). */
msr cpsr_all, r3
RET
-
+END(sa11x0_cpu_sleep)
/*
* This function is the same as sa110_context_switch for now, the plan
@@ -119,7 +119,10 @@ ENTRY(sa11x0_context_switch)
mov r0, r0
mov r0, r0
RET
+END(sa11x0_context_switch)
ENTRY(sa11x0_drain_readbuf)
mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
RET
+END(sa11x0_drain_readbuf)
+
diff --git a/sys/arm/arm/cpufunc_asm_sheeva.S b/sys/arm/arm/cpufunc_asm_sheeva.S
index d185547..796f63e 100644
--- a/sys/arm/arm/cpufunc_asm_sheeva.S
+++ b/sys/arm/arm/cpufunc_asm_sheeva.S
@@ -62,6 +62,7 @@ ENTRY(sheeva_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(sheeva_setttb)
ENTRY(sheeva_dcache_wbinv_range)
str lr, [sp, #-4]!
@@ -104,6 +105,7 @@ ENTRY(sheeva_dcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_dcache_wbinv_range)
ENTRY(sheeva_idcache_wbinv_range)
str lr, [sp, #-4]!
@@ -155,6 +157,7 @@ ENTRY(sheeva_idcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_idcache_wbinv_range)
ENTRY(sheeva_dcache_inv_range)
str lr, [sp, #-4]!
@@ -197,6 +200,7 @@ ENTRY(sheeva_dcache_inv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_dcache_inv_range)
ENTRY(sheeva_dcache_wb_range)
str lr, [sp, #-4]!
@@ -239,6 +243,7 @@ ENTRY(sheeva_dcache_wb_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_dcache_wb_range)
ENTRY(sheeva_l2cache_wbinv_range)
str lr, [sp, #-4]!
@@ -283,6 +288,7 @@ ENTRY(sheeva_l2cache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_l2cache_wbinv_range)
ENTRY(sheeva_l2cache_inv_range)
str lr, [sp, #-4]!
@@ -325,6 +331,7 @@ ENTRY(sheeva_l2cache_inv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_l2cache_inv_range)
ENTRY(sheeva_l2cache_wb_range)
str lr, [sp, #-4]!
@@ -367,6 +374,7 @@ ENTRY(sheeva_l2cache_wb_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_l2cache_wb_range)
ENTRY(sheeva_l2cache_wbinv_all)
mov r0, #0
@@ -374,6 +382,7 @@ ENTRY(sheeva_l2cache_wbinv_all)
mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(sheeva_l2cache_wbinv_all)
/* This function modifies register value as follows:
*
@@ -392,10 +401,12 @@ ENTRY(sheeva_control_ext)
mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
mov r0, r3 /* Return old value */
RET
+END(sheeva_control_ext)
ENTRY(sheeva_cpu_sleep)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
mov pc, lr
+END(sheeva_cpu_sleep)
diff --git a/sys/arm/arm/cpufunc_asm_xscale.S b/sys/arm/arm/cpufunc_asm_xscale.S
index 3601b9a..56008dc 100644
--- a/sys/arm/arm/cpufunc_asm_xscale.S
+++ b/sys/arm/arm/cpufunc_asm_xscale.S
@@ -106,6 +106,7 @@ __FBSDID("$FreeBSD$");
ENTRY(xscale_cpwait)
CPWAIT_AND_RETURN(r0)
+END(xscale_cpwait)
/*
* We need a separate cpu_control() entry point, since we have to
@@ -123,6 +124,7 @@ ENTRY(xscale_control)
mov r0, r3 /* Return old value */
CPWAIT_AND_RETURN(r1)
+END(xscale_control)
/*
* Functions to set the MMU Translation Table Base register
@@ -167,6 +169,7 @@ ENTRY(xscale_setttb)
str r2, [r3]
#endif
RET
+END(xscale_setttb)
/*
* TLB functions
@@ -176,6 +179,7 @@ ENTRY(xscale_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_tlb_flushID_SE)
/*
* Cache functions
@@ -183,18 +187,22 @@ ENTRY(xscale_tlb_flushID_SE)
ENTRY(xscale_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushID)
ENTRY(xscale_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushI)
ENTRY(xscale_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushD)
ENTRY(xscale_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushI_SE)
ENTRY(xscale_cache_flushD_SE)
/*
@@ -205,10 +213,12 @@ ENTRY(xscale_cache_flushD_SE)
mcr p15, 0, r0, c7, c10, 1
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushD_SE)
ENTRY(xscale_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_cleanD_E)
/*
* Information for the XScale cache clean/purge functions:
@@ -316,6 +326,11 @@ ENTRY(xscale_cache_cleanD)
XSCALE_CACHE_CLEAN_EPILOGUE
RET
+END(xscale_cache_syncI)
+END(xscale_cache_purgeID)
+END(xscale_cache_cleanID)
+END(xscale_cache_purgeD)
+END(xscale_cache_cleanD)
/*
* Clean the mini-data cache.
@@ -335,6 +350,7 @@ ENTRY(xscale_cache_clean_minidata)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r1)
+END(xscale_cache_clean_minidata)
ENTRY(xscale_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
@@ -343,6 +359,7 @@ ENTRY(xscale_cache_purgeID_E)
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r1)
+END(xscale_cache_purgeID_E)
ENTRY(xscale_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
@@ -350,6 +367,7 @@ ENTRY(xscale_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r1)
+END(xscale_cache_purgeD_E)
/*
* Soft functions
@@ -375,6 +393,8 @@ ENTRY(xscale_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_cleanID_rng)
+END(xscale_cache_cleanD_rng)
ENTRY(xscale_cache_purgeID_rng)
cmp r1, #0x4000
@@ -396,6 +416,7 @@ ENTRY(xscale_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_purgeID_rng)
ENTRY(xscale_cache_purgeD_rng)
cmp r1, #0x4000
@@ -416,6 +437,7 @@ ENTRY(xscale_cache_purgeD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_purgeD_rng)
ENTRY(xscale_cache_syncI_rng)
cmp r1, #0x4000
@@ -436,6 +458,7 @@ ENTRY(xscale_cache_syncI_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_syncI_rng)
ENTRY(xscale_cache_flushD_rng)
and r2, r0, #0x1f
@@ -450,6 +473,7 @@ ENTRY(xscale_cache_flushD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushD_rng)
/*
* Context switch.
@@ -475,6 +499,7 @@ ENTRY(xscale_context_switch)
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
CPWAIT_AND_RETURN(r0)
+END(xscale_context_switch)
/*
* xscale_cpu_sleep
@@ -493,3 +518,5 @@ ENTRY(xscale_cpu_sleep)
1:
RET
+END(xscale_cpu_sleep)
+
diff --git a/sys/arm/arm/cpufunc_asm_xscale_c3.S b/sys/arm/arm/cpufunc_asm_xscale_c3.S
index 9a003d0..a0494d5 100644
--- a/sys/arm/arm/cpufunc_asm_xscale_c3.S
+++ b/sys/arm/arm/cpufunc_asm_xscale_c3.S
@@ -168,6 +168,11 @@ ENTRY(xscalec3_cache_cleanD)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(xscalec3_cache_syncI)
+END(xscalec3_cache_purgeID)
+END(xscalec3_cache_cleanID)
+END(xscalec3_cache_purgeD)
+END(xscalec3_cache_cleanD)
ENTRY(xscalec3_cache_purgeID_rng)
@@ -189,6 +194,7 @@ ENTRY(xscalec3_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_cache_purgeID_rng)
ENTRY(xscalec3_cache_syncI_rng)
cmp r1, #0x4000
@@ -209,6 +215,7 @@ ENTRY(xscalec3_cache_syncI_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_cache_syncI_rng)
ENTRY(xscalec3_cache_purgeD_rng)
@@ -228,6 +235,8 @@ ENTRY(xscalec3_cache_purgeD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_cache_purgeD_rng)
+
ENTRY(xscalec3_cache_cleanID_rng)
ENTRY(xscalec3_cache_cleanD_rng)
@@ -248,7 +257,8 @@ ENTRY(xscalec3_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
-
+END(xscalec3_cache_cleanID_rng)
+END(xscalec3_cache_cleanD_rng)
ENTRY(xscalec3_l2cache_purge)
/* Clean-up the L2 cache */
@@ -271,6 +281,7 @@ ENTRY(xscalec3_l2cache_purge)
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
RET
+END(xscalec3_l2cache_purge)
ENTRY(xscalec3_l2cache_clean_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
@@ -291,6 +302,7 @@ ENTRY(xscalec3_l2cache_clean_rng)
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
+END(xscalec3_l2cache_clean_rng)
ENTRY(xscalec3_l2cache_purge_rng)
@@ -310,6 +322,7 @@ ENTRY(xscalec3_l2cache_purge_rng)
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
+END(xscalec3_l2cache_purge_rng)
ENTRY(xscalec3_l2cache_flush_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
@@ -325,6 +338,8 @@ ENTRY(xscalec3_l2cache_flush_rng)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
+END(xscalec3_l2cache_flush_rng)
+
/*
* Functions to set the MMU Translation Table Base register
*
@@ -368,6 +383,7 @@ ENTRY(xscalec3_setttb)
str r2, [r3]
#endif
RET
+END(xscalec3_setttb)
/*
* Context switch.
@@ -395,3 +411,5 @@ ENTRY(xscalec3_context_switch)
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_context_switch)
+
diff --git a/sys/arm/arm/db_trace.c b/sys/arm/arm/db_trace.c
index f921cfd..90ac0b1 100644
--- a/sys/arm/arm/db_trace.c
+++ b/sys/arm/arm/db_trace.c
@@ -126,29 +126,6 @@ struct unwind_state {
uint16_t update_mask;
};
-/* We need to provide these but never use them */
-void __aeabi_unwind_cpp_pr0(void);
-void __aeabi_unwind_cpp_pr1(void);
-void __aeabi_unwind_cpp_pr2(void);
-
-void
-__aeabi_unwind_cpp_pr0(void)
-{
- panic("__aeabi_unwind_cpp_pr0");
-}
-
-void
-__aeabi_unwind_cpp_pr1(void)
-{
- panic("__aeabi_unwind_cpp_pr1");
-}
-
-void
-__aeabi_unwind_cpp_pr2(void)
-{
- panic("__aeabi_unwind_cpp_pr2");
-}
-
/* Expand a 31-bit signed value to a 32-bit signed value */
static __inline int32_t
db_expand_prel31(uint32_t prel31)
diff --git a/sys/arm/arm/elf_trampoline.c b/sys/arm/arm/elf_trampoline.c
index 31e8bc5..fe03adf 100644
--- a/sys/arm/arm/elf_trampoline.c
+++ b/sys/arm/arm/elf_trampoline.c
@@ -701,3 +701,18 @@ __start(void)
do_call(dst, kernel, dst + (unsigned int)(&func_end) -
(unsigned int)(&load_kernel) + 800, sp);
}
+
+#ifdef __ARM_EABI__
+/* We need to provide these functions but never call them */
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr2(void);
+
+__strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1);
+__strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2);
+void
+__aeabi_unwind_cpp_pr0(void)
+{
+}
+#endif
+
diff --git a/sys/arm/arm/exception.S b/sys/arm/arm/exception.S
index b2ce47b..2ff0840 100644
--- a/sys/arm/arm/exception.S
+++ b/sys/arm/arm/exception.S
@@ -70,6 +70,7 @@ ASENTRY_NP(reset_entry)
Lreset_panicmsg:
.asciz "Reset vector called, LR = 0x%08x"
.balign 4
+END(reset_entry)
/*
* swi_entry
@@ -77,8 +78,7 @@ Lreset_panicmsg:
* Handler for the Software Interrupt exception.
*/
ASENTRY_NP(swi_entry)
- .fnstart
- .cantunwind /* Don't unwind past here */
+ STOP_UNWINDING /* Don't unwind past here */
PUSHFRAME
@@ -91,7 +91,7 @@ ASENTRY_NP(swi_entry)
DO_AST
PULLFRAME
movs pc, lr /* Exit */
- .fnend
+END(swi_entry)
/*
* prefetch_abort_entry:
@@ -128,6 +128,7 @@ abortprefetch:
abortprefetchmsg:
.asciz "abortprefetch"
.align 0
+END(prefetch_abort_entry)
/*
* data_abort_entry:
@@ -163,6 +164,7 @@ abortdata:
abortdatamsg:
.asciz "abortdata"
.align 0
+END(data_abort_entry)
/*
* address_exception_entry:
@@ -183,6 +185,7 @@ ASENTRY_NP(address_exception_entry)
Laddress_exception_msg:
.asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n"
.balign 4
+END(address_exception_entry)
/*
* General exception exit handler
@@ -224,6 +227,7 @@ ASENTRY_NP(undefined_entry)
Lundefined_handler_indirection:
.word Lundefined_handler_indirection_data
+END(undefined_entry)
/*
* assembly bounce code for calling the kernel
@@ -254,3 +258,4 @@ Lundefined_handler_indirection_data:
.global _C_LABEL(undefined_handler_address)
_C_LABEL(undefined_handler_address):
.word _C_LABEL(undefinedinstruction_bounce)
+END(undefinedinstruction_bounce)
diff --git a/sys/arm/arm/fiq_subr.S b/sys/arm/arm/fiq_subr.S
index 4cde665..7f510b2 100644
--- a/sys/arm/arm/fiq_subr.S
+++ b/sys/arm/arm/fiq_subr.S
@@ -74,6 +74,7 @@ ENTRY(fiq_getregs)
BACK_TO_SVC_MODE
RET
+END(fiq_getregs)
/*
* fiq_setregs:
@@ -88,6 +89,7 @@ ENTRY(fiq_setregs)
BACK_TO_SVC_MODE
RET
+END(fiq_setregs)
/*
* fiq_nullhandler:
diff --git a/sys/arm/arm/fusu.S b/sys/arm/arm/fusu.S
index edf1a63..443ca21 100644
--- a/sys/arm/arm/fusu.S
+++ b/sys/arm/arm/fusu.S
@@ -76,6 +76,8 @@ ENTRY(casuword)
mov r1, #0x00000000
str r1, [r3, #PCB_ONFAULT]
RET
+END(casuword32)
+END(casuword)
/*
* Handle faults from casuword. Clean up and return -1.
@@ -87,6 +89,7 @@ ENTRY(casuword)
mvn r0, #0x00000000
ldmfd sp!, {r4, r5}
RET
+
/*
* fuword(caddr_t uaddr);
* Fetch an int from the user's address space.
@@ -111,6 +114,8 @@ ENTRY(fuword)
str r1, [r2, #PCB_ONFAULT]
mov r0, r3
RET
+END(fuword32)
+END(fuword)
/*
* fusword(caddr_t uaddr);
@@ -139,6 +144,7 @@ ENTRY(fusword)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
RET
+END(fusword)
/*
* fuswintr(caddr_t uaddr);
@@ -175,6 +181,7 @@ ENTRY(fuswintr)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
RET
+END(fuswintr)
Lblock_userspace_access:
.word _C_LABEL(block_userspace_access)
@@ -209,6 +216,7 @@ ENTRY(fubyte)
str r1, [r2, #PCB_ONFAULT]
mov r0, r3
RET
+END(fubyte)
/*
* Handle faults from [fs]u*(). Clean up and return -1.
@@ -272,6 +280,8 @@ ENTRY(suword)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(suword32)
+END(suword)
/*
* suswintr(caddr_t uaddr, short x);
@@ -309,6 +319,7 @@ ENTRY(suswintr)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(suswintr)
/*
* susword(caddr_t uaddr, short x);
@@ -339,6 +350,7 @@ ENTRY(susword)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(susword)
/*
* subyte(caddr_t uaddr, char x);
@@ -362,3 +374,5 @@ ENTRY(subyte)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(subyte)
+
diff --git a/sys/arm/arm/in_cksum_arm.S b/sys/arm/arm/in_cksum_arm.S
index 3646c64..6305caf 100644
--- a/sys/arm/arm/in_cksum_arm.S
+++ b/sys/arm/arm/in_cksum_arm.S
@@ -90,13 +90,15 @@ ENTRY(in_cksum)
and r0, r0, r1
eor r0, r0, r1
ldmfd sp!, {r4-r11,pc}
-
+END(in_cksum)
ENTRY(do_cksum)
stmfd sp!, {r4-r7, lr}
bl L_cksumdata
mov r0, r2
ldmfd sp!, {r4-r7, pc}
+END(do_cksum)
+
/*
* The main in*_cksum() workhorse...
*
@@ -337,3 +339,5 @@ ASENTRY_NP(L_cksumdata)
adds r2, r2, r3
adc r2, r2, #0x00
RET
+END(L_cksumdata)
+
diff --git a/sys/arm/arm/irq_dispatch.S b/sys/arm/arm/irq_dispatch.S
index 6e510dd..823091d 100644
--- a/sys/arm/arm/irq_dispatch.S
+++ b/sys/arm/arm/irq_dispatch.S
@@ -97,6 +97,7 @@ ASENTRY_NP(irq_entry)
DO_AST
PULLFRAMEFROMSVCANDEXIT
movs pc, lr /* Exit */
+END(irq_entry)
.data
.align 0
diff --git a/sys/arm/arm/locore.S b/sys/arm/arm/locore.S
index 37e88fe..51fd5c1 100644
--- a/sys/arm/arm/locore.S
+++ b/sys/arm/arm/locore.S
@@ -242,6 +242,9 @@ Lstartup_pagetable:
Lstartup_pagetable_secondary:
.word temp_pagetable
#endif
+END(btext)
+END(_start)
+
mmu_init_table:
/* fill all table VA==PA */
/* map SDRAM VA==PA, WT cacheable */
@@ -324,6 +327,7 @@ ASENTRY_NP(mptramp)
Lpmureg:
.word 0xd0022124
+END(mptramp)
ASENTRY_NP(mpentry)
@@ -408,6 +412,7 @@ mpvirt_done:
.Lmpreturned:
.asciz "main() returned"
.align 0
+END(mpentry)
#endif
ENTRY_NP(cpu_halt)
@@ -461,6 +466,7 @@ ENTRY_NP(cpu_halt)
*/
.Lcpu_reset_needs_v4_MMU_disable:
.word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
+END(cpu_halt)
/*
@@ -470,11 +476,13 @@ ENTRY(setjmp)
stmia r0, {r4-r14}
mov r0, #0x00000000
RET
+END(setjmp)
ENTRY(longjmp)
ldmia r0, {r4-r14}
mov r0, #0x00000001
RET
+END(longjmp)
.data
.global _C_LABEL(esym)
@@ -482,6 +490,7 @@ _C_LABEL(esym): .word _C_LABEL(end)
ENTRY_NP(abort)
b _C_LABEL(abort)
+END(abort)
ENTRY_NP(sigcode)
mov r0, sp
@@ -517,4 +526,5 @@ ENTRY_NP(sigcode)
.global szsigcode
szsigcode:
.long esigcode-sigcode
+END(sigcode)
/* End of locore.S */
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 2affa3e..0083f29 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -3313,6 +3313,45 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
}
void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ vm_page_t a_pg, b_pg;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ mtx_lock(&cmtx);
+ while (xfersize > 0) {
+ a_pg = ma[a_offset >> PAGE_SHIFT];
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ b_pg = mb[b_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ *csrc_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(a_pg) |
+ pte_l2_s_cache_mode;
+ pmap_set_prot(csrc_pte, VM_PROT_READ, 0);
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(b_pg) |
+ pte_l2_s_cache_mode;
+ pmap_set_prot(cdst_pte, VM_PROT_READ | VM_PROT_WRITE, 0);
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy((char *)csrcp + a_pg_offset, (char *)cdstp + b_pg_offset,
+ cnt);
+ cpu_idcache_wbinv_range(cdstp + b_pg_offset, cnt);
+ pmap_l2cache_wbinv_range(cdstp + b_pg_offset,
+ VM_PAGE_TO_PHYS(b_pg) + b_pg_offset, cnt);
+ xfersize -= cnt;
+ a_offset += cnt;
+ b_offset += cnt;
+ }
+ mtx_unlock(&cmtx);
+}
+
+void
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 7070cb2..c18783b 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -258,6 +258,9 @@ pt_entry_t pte_l1_c_proto;
pt_entry_t pte_l2_s_proto;
void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
+void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
+ vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs,
+ int cnt);
void (*pmap_zero_page_func)(vm_paddr_t, int, int);
struct msgbuf *msgbufp = 0;
@@ -400,6 +403,13 @@ static vm_paddr_t pmap_kernel_l2ptp_phys;
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static struct rwlock pvh_global_lock;
+void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
+ vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
+#if ARM_MMU_XSCALE == 1
+void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
+ vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
+#endif
+
/*
* This list exists for the benefit of pmap_map_chunk(). It keeps track
* of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
@@ -484,6 +494,7 @@ pmap_pte_init_generic(void)
pte_l2_s_proto = L2_S_PROTO_generic;
pmap_copy_page_func = pmap_copy_page_generic;
+ pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
pmap_zero_page_func = pmap_zero_page_generic;
}
@@ -660,6 +671,7 @@ pmap_pte_init_xscale(void)
#ifdef CPU_XSCALE_CORE3
pmap_copy_page_func = pmap_copy_page_generic;
+ pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
pmap_zero_page_func = pmap_zero_page_generic;
xscale_use_minidata = 0;
/* Make sure it is L2-cachable */
@@ -672,6 +684,7 @@ pmap_pte_init_xscale(void)
#else
pmap_copy_page_func = pmap_copy_page_xscale;
+ pmap_copy_page_offs_func = pmap_copy_page_offs_xscale;
pmap_zero_page_func = pmap_zero_page_xscale;
#endif
@@ -4300,6 +4313,29 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
cpu_l2cache_inv_range(csrcp, PAGE_SIZE);
cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE);
}
+
+void
+pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
+ vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
+{
+
+ mtx_lock(&cmtx);
+ *csrc_pte = L2_S_PROTO | a_phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | b_phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
+ mtx_unlock(&cmtx);
+ cpu_dcache_inv_range(csrcp + a_offs, cnt);
+ cpu_dcache_wbinv_range(cdstp + b_offs, cnt);
+ cpu_l2cache_inv_range(csrcp + a_offs, cnt);
+ cpu_l2cache_wbinv_range(cdstp + b_offs, cnt);
+}
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_XSCALE == 1
@@ -4344,6 +4380,28 @@ pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
mtx_unlock(&cmtx);
xscale_cache_clean_minidata();
}
+
+void
+pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
+ vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
+{
+
+ mtx_lock(&cmtx);
+ *csrc_pte = L2_S_PROTO | a_phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | b_phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
+ mtx_unlock(&cmtx);
+ xscale_cache_clean_minidata();
+}
#endif /* ARM_MMU_XSCALE == 1 */
void
@@ -4370,8 +4428,41 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
#endif
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ vm_page_t a_pg, b_pg;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+#ifdef ARM_USE_SMALL_ALLOC
+ vm_offset_t a_va, b_va;
+#endif
-
+ cpu_dcache_wbinv_all();
+ cpu_l2cache_wbinv_all();
+ while (xfersize > 0) {
+ a_pg = ma[a_offset >> PAGE_SHIFT];
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ b_pg = mb[b_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+#ifdef ARM_USE_SMALL_ALLOC
+ a_va = arm_ptovirt(VM_PAGE_TO_PHYS(a_pg)) + a_pg_offset;
+ b_va = arm_ptovirt(VM_PAGE_TO_PHYS(b_pg)) + b_pg_offset;
+ bcopy((char *)a_va, (char *)b_va, cnt);
+ cpu_dcache_wbinv_range(b_va, cnt);
+ cpu_l2cache_wbinv_range(b_va, cnt);
+#else
+ pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset,
+ VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt);
+#endif
+ xfersize -= cnt;
+ a_offset += cnt;
+ b_offset += cnt;
+ }
+}
/*
* this routine returns true if a physical page resides
diff --git a/sys/arm/arm/setcpsr.S b/sys/arm/arm/setcpsr.S
index 4597d53..ac86ba3 100644
--- a/sys/arm/arm/setcpsr.S
+++ b/sys/arm/arm/setcpsr.S
@@ -66,6 +66,7 @@ ENTRY_NP(SetCPSR)
mov r0, r3 /* Return the old CPSR */
RET
+END(SetCPSR)
/* Gets the CPSR register
@@ -77,4 +78,5 @@ ENTRY_NP(GetCPSR)
mrs r0, cpsr /* Get the CPSR */
RET
+END(GetCPSR)
diff --git a/sys/arm/arm/support.S b/sys/arm/arm/support.S
index d4c6fb4..0c117a9 100644
--- a/sys/arm/arm/support.S
+++ b/sys/arm/arm/support.S
@@ -277,6 +277,8 @@ do_memset:
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
RET /* Exit */
+END(bzero)
+END(memset)
ENTRY(bcmp)
mov ip, r0
@@ -386,6 +388,7 @@ ENTRY(bcmp)
RETne /* Return if mismatch on #4 */
sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
RET
+END(bcmp)
ENTRY(bcopy)
/* switch the source and destination registers */
@@ -929,6 +932,8 @@ ENTRY(memmove)
.Lmemmove_bsrcul1l4:
add r1, r1, #1
b .Lmemmove_bl4
+END(bcopy)
+END(memmove)
#if !defined(_ARM_ARCH_5E)
ENTRY(memcpy)
@@ -1164,6 +1169,8 @@ ENTRY(memcpy)
.Lmemcpy_srcul3l4:
sub r1, r1, #1
b .Lmemcpy_l4
+END(memcpy)
+
#else
/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
ENTRY(memcpy)
@@ -2932,6 +2939,7 @@ ENTRY(memcpy)
strh r2, [r0, #0x09]
strb r1, [r0, #0x0b]
RET
+END(memcpy)
#endif /* _ARM_ARCH_5E */
#ifdef GPROF
diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S
index 4257557..f10b8f9 100644
--- a/sys/arm/arm/swtch.S
+++ b/sys/arm/arm/swtch.S
@@ -213,6 +213,7 @@ ENTRY(cpu_throw)
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
+END(cpu_throw)
ENTRY(cpu_switch)
stmfd sp!, {r4-r7, lr}
@@ -502,6 +503,8 @@ ENTRY(cpu_switch)
.Lswitch_panic_str:
.asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
#endif
+END(cpu_switch)
+
ENTRY(savectx)
stmfd sp!, {r4-r7, lr}
sub sp, sp, #4
@@ -534,6 +537,7 @@ ENTRY(savectx)
#endif /* ARM_VFP_SUPPORT */
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
+END(savectx)
ENTRY(fork_trampoline)
mov r1, r5
@@ -551,3 +555,5 @@ ENTRY(fork_trampoline)
movs pc, lr /* Exit */
AST_LOCALS
+END(fork_trampoline)
+
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
index 3512954..f4a2de6 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c
@@ -122,9 +122,7 @@ struct bcm_sdhci_softc {
int sc_dma_ch;
bus_dma_tag_t sc_dma_tag;
bus_dmamap_t sc_dma_map;
- void *sc_dma_buffer;
- vm_paddr_t sc_dma_buffer_phys;
- vm_paddr_t sc_sdhci_buffer_phys;;
+ vm_paddr_t sc_sdhci_buffer_phys;
};
static int bcm_sdhci_probe(device_t);
@@ -171,9 +169,6 @@ bcm_sdhci_attach(device_t dev)
phandle_t node;
pcell_t cell;
int default_freq;
- void *buffer;
- vm_paddr_t buffer_phys;
- void *va;
sc->sc_dev = dev;
sc->sc_req = NULL;
@@ -210,7 +205,7 @@ bcm_sdhci_attach(device_t dev)
goto fail;
}
- if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
+ if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand))
{
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
@@ -243,7 +238,7 @@ bcm_sdhci_attach(device_t dev)
bcm_dma_setup_intr(sc->sc_dma_ch, bcm_sdhci_dma_intr, sc);
- /* Allocate DMA buffers */
+ /* Allocate bus_dma resources. */
err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL,
@@ -256,37 +251,14 @@ bcm_sdhci_attach(device_t dev)
goto fail;
}
- err = bus_dmamem_alloc(sc->sc_dma_tag, &buffer,
- BUS_DMA_WAITOK | BUS_DMA_COHERENT| BUS_DMA_ZERO,
- &sc->sc_dma_map);
-
- if (err) {
- device_printf(dev, "cannot allocate DMA memory\n");
- goto fail;
- }
-
- err = bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, buffer,
- BCM_SDHCI_BUFFER_SIZE, bcm_dmamap_cb, &buffer_phys,
- BUS_DMA_WAITOK);
+ err = bus_dmamap_create(sc->sc_dma_tag, 0, &sc->sc_dma_map);
if (err) {
- device_printf(dev, "cannot load DMA memory\n");
- goto fail;
- }
-
- /*
- * Sanity check: two least bits of address should be zero
- */
- if ((uintptr_t)buffer & 3) {
- device_printf(dev,
- "DMA address is not word-aligned\n");
+ device_printf(dev, "bus_dmamap_create failed\n");
goto fail;
}
- sc->sc_dma_buffer = buffer;
- sc->sc_dma_buffer_phys = buffer_phys;
- va = (void*)rman_get_start(sc->sc_mem_res);
- sc->sc_sdhci_buffer_phys =
- pmap_kextract((vm_offset_t)va) + SDHCI_BUFFER;
+ sc->sc_sdhci_buffer_phys = BUS_SPACE_PHYSADDR(sc->sc_mem_res,
+ SDHCI_BUFFER);
bus_generic_probe(dev);
bus_generic_attach(dev);
@@ -447,27 +419,23 @@ bcm_sdhci_dma_intr(int ch, void *arg)
struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg;
struct sdhci_slot *slot = &sc->sc_slot;
uint32_t reg, mask;
- void *buffer;
+ bus_addr_t pmem;
+ vm_paddr_t pdst, psrc;
size_t len;
- int left;
+ int left, sync_op;
mtx_lock(&slot->mtx);
- /* copy DMA buffer to VA if READ */
len = bcm_dma_length(sc->sc_dma_ch);
if (slot->curcmd->data->flags & MMC_DATA_READ) {
- bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
- BUS_DMASYNC_POSTREAD);
-
+ sync_op = BUS_DMASYNC_POSTREAD;
mask = SDHCI_INT_DATA_AVAIL;
- /* all dma data in single or contiguous page */
- buffer = (uint8_t*)(slot->curcmd->data->data) + slot->offset;
- memcpy(buffer, sc->sc_dma_buffer, len);
} else {
- bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
- BUS_DMASYNC_POSTWRITE);
+ sync_op = BUS_DMASYNC_POSTWRITE;
mask = SDHCI_INT_SPACE_AVAIL;
}
+ bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op);
+ bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map);
slot->offset += len;
sc->sc_dma_inuse = 0;
@@ -501,27 +469,22 @@ bcm_sdhci_dma_intr(int ch, void *arg)
SDHCI_INT_STATUS, mask);
/* continue next DMA transfer */
+ bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map,
+ (uint8_t *)slot->curcmd->data->data +
+ slot->offset, left, bcm_dmamap_cb, &pmem, 0);
if (slot->curcmd->data->flags & MMC_DATA_READ) {
- bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
- BUS_DMASYNC_PREREAD);
-
- /* DMA start */
- if (bcm_dma_start(sc->sc_dma_ch,
- sc->sc_sdhci_buffer_phys,
- sc->sc_dma_buffer_phys, left) != 0)
- device_printf(sc->sc_dev, "failed DMA start\n");
+ psrc = sc->sc_sdhci_buffer_phys;
+ pdst = pmem;
+ sync_op = BUS_DMASYNC_PREREAD;
} else {
- buffer = (char*)slot->curcmd->data->data + slot->offset;
- memcpy(sc->sc_dma_buffer, buffer, left);
-
- bus_dmamap_sync(sc->sc_dma_tag,
- sc->sc_dma_map, BUS_DMASYNC_PREWRITE);
-
- /* DMA start */
- if (bcm_dma_start(sc->sc_dma_ch,
- sc->sc_dma_buffer_phys,
- sc->sc_sdhci_buffer_phys, left) != 0)
- device_printf(sc->sc_dev, "failed DMA start\n");
+ psrc = pmem;
+ pdst = sc->sc_sdhci_buffer_phys;
+ sync_op = BUS_DMASYNC_PREWRITE;
+ }
+ bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op);
+ if (bcm_dma_start(sc->sc_dma_ch, psrc, pdst, left)) {
+ /* XXX stop xfer, other error recovery? */
+ device_printf(sc->sc_dev, "failed DMA start\n");
}
} else {
/* wait for next data by INT */
@@ -542,6 +505,7 @@ bcm_sdhci_read_dma(struct sdhci_slot *slot)
{
struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
size_t left;
+ bus_addr_t paddr;
if (sc->sc_dma_inuse) {
device_printf(sc->sc_dev, "DMA in use\n");
@@ -562,12 +526,16 @@ bcm_sdhci_read_dma(struct sdhci_slot *slot)
BCM_DMA_INC_ADDR,
(left & 0xf) ? BCM_DMA_32BIT : BCM_DMA_128BIT);
+ bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map,
+ (uint8_t *)slot->curcmd->data->data + slot->offset, left,
+ bcm_dmamap_cb, &paddr, 0);
+
bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
BUS_DMASYNC_PREREAD);
/* DMA start */
if (bcm_dma_start(sc->sc_dma_ch, sc->sc_sdhci_buffer_phys,
- sc->sc_dma_buffer_phys, left) != 0)
+ paddr, left) != 0)
device_printf(sc->sc_dev, "failed DMA start\n");
}
@@ -575,8 +543,8 @@ static void
bcm_sdhci_write_dma(struct sdhci_slot *slot)
{
struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
- char *buffer;
size_t left;
+ bus_addr_t paddr;
if (sc->sc_dma_inuse) {
device_printf(sc->sc_dev, "DMA in use\n");
@@ -591,8 +559,9 @@ bcm_sdhci_write_dma(struct sdhci_slot *slot)
KASSERT((left & 3) == 0,
("%s: len = %d, not word-aligned", __func__, left));
- buffer = (char*)slot->curcmd->data->data + slot->offset;
- memcpy(sc->sc_dma_buffer, buffer, left);
+ bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map,
+ (uint8_t *)slot->curcmd->data->data + slot->offset, left,
+ bcm_dmamap_cb, &paddr, 0);
bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
BCM_DMA_INC_ADDR,
@@ -604,7 +573,7 @@ bcm_sdhci_write_dma(struct sdhci_slot *slot)
BUS_DMASYNC_PREWRITE);
/* DMA start */
- if (bcm_dma_start(sc->sc_dma_ch, sc->sc_dma_buffer_phys,
+ if (bcm_dma_start(sc->sc_dma_ch, paddr,
sc->sc_sdhci_buffer_phys, left) != 0)
device_printf(sc->sc_dev, "failed DMA start\n");
}
@@ -614,11 +583,16 @@ bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot)
{
size_t left;
- /* Do not use DMA for transfers less then block size */
+ /*
+ * Do not use DMA for transfers less than block size or with a length
+ * that is not a multiple of four.
+ */
left = min(BCM_DMA_BLOCK_SIZE,
slot->curcmd->data->len - slot->offset);
if (left < BCM_DMA_BLOCK_SIZE)
return (0);
+ if (left & 0x03)
+ return (0);
return (1);
}
diff --git a/sys/arm/include/asm.h b/sys/arm/include/asm.h
index 3ae25b8..81f67a3 100644
--- a/sys/arm/include/asm.h
+++ b/sys/arm/include/asm.h
@@ -66,6 +66,16 @@
# define _ALIGN_TEXT .align 0
#endif
+#ifdef __ARM_EABI__
+#define STOP_UNWINDING .cantunwind
+#define _FNSTART .fnstart
+#define _FNEND .fnend
+#else
+#define STOP_UNWINDING
+#define _FNSTART
+#define _FNEND
+#endif
+
/*
* gas/arm uses @ as a single comment character and thus cannot be used here
* Instead it recognised the # instead of an @ symbols in .type directives
@@ -76,7 +86,9 @@
#define _ASM_TYPE_OBJECT #object
#define GLOBAL(X) .globl x
#define _ENTRY(x) \
- .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x:
+ .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART
+
+#define END(x) .size x, . - x; _FNEND
#ifdef GPROF
# define _PROF_PROLOGUE \
diff --git a/sys/arm/include/bus.h b/sys/arm/include/bus.h
index cabf1f7..ce8f5ad 100644
--- a/sys/arm/include/bus.h
+++ b/sys/arm/include/bus.h
@@ -725,4 +725,12 @@ bs_c_8_proto(f);
#include <machine/bus_dma.h>
+/*
+ * Get the physical address of a bus space memory-mapped resource.
+ * Doing this as a macro is a temporary solution until a more robust fix is
+ * designed. It also serves to mark the locations needing that fix.
+ */
+#define BUS_SPACE_PHYSADDR(res, offs) \
+ (vtophys(rman_get_start(res)+(offs)))
+
#endif /* _MACHINE_BUS_H_ */
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index 523499f..7c8d073 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -533,6 +533,8 @@ extern pt_entry_t pte_l1_c_proto;
extern pt_entry_t pte_l2_s_proto;
extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
+extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
+ vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c
index 93702df..4b01a74 100644
--- a/sys/arm/ti/cpsw/if_cpsw.c
+++ b/sys/arm/ti/cpsw/if_cpsw.c
@@ -327,7 +327,7 @@ cpsw_debugf(const char *fmt, ...)
#define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16))
#define cpsw_cpdma_bd_paddr(sc, slot) \
- (slot->bd_offset + vtophys(rman_get_start(sc->res[0])))
+ BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset)
#define cpsw_cpdma_read_bd(sc, slot, val) \
bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
#define cpsw_cpdma_write_bd(sc, slot, val) \
diff --git a/sys/arm/ti/ti_mmchs.c b/sys/arm/ti/ti_mmchs.c
index cf9dc21..76ecc58 100644
--- a/sys/arm/ti/ti_mmchs.c
+++ b/sys/arm/ti/ti_mmchs.c
@@ -1584,7 +1584,6 @@ static int
ti_mmchs_activate(device_t dev)
{
struct ti_mmchs_softc *sc = device_get_softc(dev);
- unsigned long addr;
int rid;
int err;
@@ -1630,8 +1629,8 @@ ti_mmchs_activate(device_t dev)
panic("Unknown OMAP device\n");
/* Get the physical address of the MMC data register, needed for DMA */
- addr = vtophys(rman_get_start(sc->sc_mem_res));
- sc->sc_data_reg_paddr = addr + sc->sc_reg_off + MMCHS_DATA;
+ sc->sc_data_reg_paddr = BUS_SPACE_PHYSADDR(sc->sc_mem_res,
+ sc->sc_reg_off + MMCHS_DATA);
/* Set the initial power state to off */
sc->sc_cur_power_mode = power_off;
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c
index dc64682..a8916d2 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c
@@ -271,7 +271,7 @@ uint64_t zfs_deadman_synctime = 1000ULL;
TUNABLE_QUAD("vfs.zfs.deadman_synctime", &zfs_deadman_synctime);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime, CTLFLAG_RDTUN,
&zfs_deadman_synctime, 0,
- "Stalled ZFS I/O expiration time in units of vfs.zfs.txg_synctime_ms");
+ "Stalled ZFS I/O expiration time in units of vfs.zfs.txg.synctime_ms");
/*
* Default value of -1 for zfs_deadman_enabled is resolved in
diff --git a/sys/compat/ndis/kern_ndis.c b/sys/compat/ndis/kern_ndis.c
index e094997..71b4b80 100644
--- a/sys/compat/ndis/kern_ndis.c
+++ b/sys/compat/ndis/kern_ndis.c
@@ -656,13 +656,9 @@ ndis_ptom(m0, p)
for (buf = priv->npp_head; buf != NULL; buf = buf->mdl_next) {
if (buf == priv->npp_head)
-#ifdef MT_HEADER
- MGETHDR(m, M_NOWAIT, MT_HEADER);
-#else
- MGETHDR(m, M_NOWAIT, MT_DATA);
-#endif
+ m = m_gethdr(M_NOWAIT, MT_DATA);
else
- MGET(m, M_NOWAIT, MT_DATA);
+ m = m_get(M_NOWAIT, MT_DATA);
if (m == NULL) {
m_freem(*m0);
*m0 = NULL;
diff --git a/sys/conf/Makefile.arm b/sys/conf/Makefile.arm
index 8e3b82e..8348e2a 100644
--- a/sys/conf/Makefile.arm
+++ b/sys/conf/Makefile.arm
@@ -44,7 +44,9 @@ CFLAGS += -mno-thumb-interwork
.endif
.if empty(DDB_ENABLED)
+.if !defined(WITH_ARM_EABI) && ${COMPILER_TYPE} != "clang"
CFLAGS += -mno-apcs-frame
+.endif
.elif defined(WITH_ARM_EABI)
CFLAGS += -funwind-tables
.if ${COMPILER_TYPE} == "clang"
@@ -82,44 +84,43 @@ ${KERNEL_KO}.tramp: ${KERNEL_KO} $S/$M/$M/inckern.S $S/$M/$M/elf_trampoline.c
echo "#define KERNNAME \"${KERNEL_KO}.tmp\"" >opt_kernname.h
sed s/${KERNVIRTADDR}/${KERNPHYSADDR}/ ldscript.$M > ldscript.$M.tramp
sed s/" + SIZEOF_HEADERS"// ldscript.$M.tramp > \
- ldscript.$M.tramp.noheader
+ ldscript.$M.tramp.noheader
echo "#include <machine/asm.h>" >tmphack.S
echo "ENTRY(_start)" >>tmphack.S
echo "bl _startC" >>tmphack.S
${OBJCOPY} --strip-symbol '$$d' --strip-symbol '$$a' \
- -g --strip-symbol '$$t' ${FULLKERNEL} ${KERNEL_KO}.tmp
+ -g --strip-symbol '$$t' ${FULLKERNEL} ${KERNEL_KO}.tmp
eval $$(stat -s ${KERNEL_KO}.tmp) && \
- echo "#define KERNSIZE $$st_size" >>opt_kernname.h
+ echo "#define KERNSIZE $$st_size" >>opt_kernname.h
${CC} -O -nostdlib -I. -I$S -Xlinker -T -Xlinker ldscript.$M.tramp \
- tmphack.S $S/$M/$M/elf_trampoline.c $S/$M/$M/inckern.S \
- ${FILES_CPU_FUNC} -o ${KERNEL_KO}.tramp
+ tmphack.S $S/$M/$M/elf_trampoline.c $S/$M/$M/inckern.S \
+ ${FILES_CPU_FUNC} -o ${KERNEL_KO}.tramp
${CC} -O -nostdlib -I. -I$S -Xlinker -T -Xlinker \
- ldscript.$M.tramp.noheader \
- tmphack.S $S/$M/$M/elf_trampoline.c $S/$M/$M/inckern.S \
- ${FILES_CPU_FUNC} -o ${KERNEL_KO}.tramp.noheader
+ ldscript.$M.tramp.noheader \
+ tmphack.S $S/$M/$M/elf_trampoline.c $S/$M/$M/inckern.S \
+ ${FILES_CPU_FUNC} -o ${KERNEL_KO}.tramp.noheader
${OBJCOPY} -S -O binary ${KERNEL_KO}.tramp.noheader \
- ${KERNEL_KO}.tramp.bin
+ ${KERNEL_KO}.tramp.bin
${OBJCOPY} ${STRIP_FLAGS} ${KERNEL_KO}.tmp
- echo "#define KERNNAME \"${KERNEL_KO}.tmp.gz\"" \
- >opt_kernname.h
+ echo "#define KERNNAME \"${KERNEL_KO}.tmp.gz\"" >opt_kernname.h
eval $$(stat -s ${KERNEL_KO}.tmp) && \
- echo "#define KERNSIZE $$st_size" >>opt_kernname.h
+ echo "#define KERNSIZE $$st_size" >>opt_kernname.h
gzip -f9 ${KERNEL_KO}.tmp
eval $$(stat -s ${KERNEL_KO}.tmp.gz) && \
- echo "#define KERNCOMPSIZE $$st_size" >>opt_kernname.h
+ echo "#define KERNCOMPSIZE $$st_size" >>opt_kernname.h
${CC} -O2 -ffreestanding -DKZIP -I. -I$S -c $S/kern/inflate.c -o \
inflate-tramp.o
${CC} -O -nostdlib -I. -I$S -Xlinker -T -Xlinker ldscript.$M.tramp \
- -DKZIP tmphack.S $S/$M/$M/elf_trampoline.c inflate-tramp.o \
- $S/$M/$M/inckern.S ${FILES_CPU_FUNC} -o ${KERNEL_KO}.gz.tramp
+ -DKZIP tmphack.S $S/$M/$M/elf_trampoline.c inflate-tramp.o \
+ $S/$M/$M/inckern.S ${FILES_CPU_FUNC} -o ${KERNEL_KO}.gz.tramp
${CC} -O -nostdlib -I. -I$S -Xlinker -T -Xlinker \
- ldscript.$M.tramp.noheader \
- -DKZIP tmphack.S $S/$M/$M/elf_trampoline.c inflate-tramp.o \
- $S/$M/$M/inckern.S ${FILES_CPU_FUNC} -o ${KERNEL_KO}.tramp.noheader
+ ldscript.$M.tramp.noheader \
+ -DKZIP tmphack.S $S/$M/$M/elf_trampoline.c inflate-tramp.o \
+ $S/$M/$M/inckern.S ${FILES_CPU_FUNC} -o ${KERNEL_KO}.tramp.noheader
${OBJCOPY} -S -O binary ${KERNEL_KO}.tramp.noheader \
- ${KERNEL_KO}.gz.tramp.bin
+ ${KERNEL_KO}.gz.tramp.bin
rm ${KERNEL_KO}.tmp.gz ${KERNEL_KO}.tramp.noheader opt_kernname.h \
- inflate-tramp.o tmphack.S
+ inflate-tramp.o tmphack.S
MKMODULESENV+= MACHINE=${MACHINE}
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 27c3380..5a5000c 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -986,7 +986,7 @@ options DUMMYNET
# See zero_copy(9) for more details.
# XXX: The COW based send mechanism is not safe and may result in
# kernel crashes.
-# XXX: None of the current NIC drivers support disposeable pages.
+# XXX: None of the current NIC drivers support disposable pages.
options SOCKET_SEND_COW
options SOCKET_RECV_PFLIP
diff --git a/sys/conf/files b/sys/conf/files
index b0d772e..04e66a1 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -55,9 +55,9 @@ aic79xx_reg_print.o optional ahd pci ahd_reg_pretty_print \
# from the specified source (DTS) file: <platform>.dts -> <platform>.dtb
#
fdt_dtb_file optional fdt \
- compile-with "if [ -f $S/boot/fdt/dts/${FDT_DTS_FILE} ]; then dtc -O dtb -o `echo ${FDT_DTS_FILE} | cut -d. -f1`.dtb -b 0 -p 1024 $S/boot/fdt/dts/${FDT_DTS_FILE}; fi" \
+ compile-with "if [ -f $S/boot/fdt/dts/${FDT_DTS_FILE} ]; then dtc -O dtb -o ${FDT_DTS_FILE:R}.dtb -b 0 -p 1024 $S/boot/fdt/dts/${FDT_DTS_FILE}; fi" \
no-obj no-implicit-rule before-depend \
- clean "`echo ${FDT_DTS_FILE} | cut -d. -f1`.dtb"
+ clean "${FDT_DTS_FILE:R}.dtb"
fdt_static_dtb.h optional fdt fdt_dtb_static \
compile-with "sh $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ." \
no-obj no-implicit-rule before-depend \
diff --git a/sys/conf/files.arm b/sys/conf/files.arm
index 33ec0fb..f9e0be8 100644
--- a/sys/conf/files.arm
+++ b/sys/conf/files.arm
@@ -72,11 +72,13 @@ font.h optional sc \
clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8"
kern/subr_busdma_bufalloc.c standard
kern/subr_dummy_vdso_tc.c standard
+libkern/arm/aeabi_unwind.c standard
libkern/arm/divsi3.S standard
libkern/arm/ffs.S standard
libkern/arm/ldivmod.S standard
libkern/arm/ldivmod_helper.c standard
libkern/arm/memcpy.S standard
+libkern/arm/memset.S standard
libkern/arm/muldi3.c standard
libkern/ashldi3.c standard
libkern/ashrdi3.c standard
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index ba08286..aa343a6 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -47,7 +47,7 @@ __FBSDID("$FreeBSD$");
* resource, and only deactivate it when there are no powered devices.
*
* Note that this only manages resources for known devices. There is an
- * ugly case where we may turn of power to a device which is in use because
+ * ugly case where we may turn off power to a device which is in use because
* we don't know that it depends on a given resource. We should perhaps
* try to be smarter about this, but a more complete solution would involve
* scanning all of the ACPI namespace to find devices we're not currently
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 11ecd1a..7601f2f 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -3750,39 +3750,6 @@ ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
}
/*
- * Update the busy status of the last frame on the free list.
- * When doing TDMA, the busy flag tracks whether the hardware
- * currently points to this buffer or not, and thus gated DMA
- * may restart by re-reading the last descriptor in this
- * buffer.
- *
- * This should be called in the completion function once one
- * of the buffers has been used.
- */
-static void
-ath_tx_update_busy(struct ath_softc *sc)
-{
- struct ath_buf *last;
-
- /*
- * Since the last frame may still be marked
- * as ATH_BUF_BUSY, unmark it here before
- * finishing the frame processing.
- * Since we've completed a frame (aggregate
- * or otherwise), the hardware has moved on
- * and is no longer referencing the previous
- * descriptor.
- */
- ATH_TXBUF_LOCK_ASSERT(sc);
- last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
- if (last != NULL)
- last->bf_flags &= ~ATH_BUF_BUSY;
- last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
- if (last != NULL)
- last->bf_flags &= ~ATH_BUF_BUSY;
-}
-
-/*
* Process the completion of the given buffer.
*
* This calls the rate control update and then the buffer completion.
@@ -3901,7 +3868,6 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
-#ifdef IEEE80211_SUPPORT_TDMA
if (txq->axq_depth > 0) {
/*
* More frames follow. Mark the buffer busy
@@ -3914,9 +3880,6 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
*/
bf->bf_last->bf_flags |= ATH_BUF_BUSY;
} else
-#else
- if (txq->axq_depth == 0)
-#endif
txq->axq_link = NULL;
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
@@ -4188,6 +4151,50 @@ ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
}
/*
+ * Free the holding buffer if it exists
+ */
+static void
+ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
+{
+ ATH_TXBUF_LOCK_ASSERT(sc);
+
+ if (txq->axq_holdingbf == NULL)
+ return;
+
+ txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
+ ath_returnbuf_tail(sc, txq->axq_holdingbf);
+ txq->axq_holdingbf = NULL;
+}
+
+/*
+ * Add this buffer to the holding queue, freeing the previous
+ * one if it exists.
+ */
+static void
+ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_txq *txq;
+
+ ATH_TXBUF_LOCK_ASSERT(sc);
+
+ /* XXX assert ATH_BUF_BUSY is set */
+
+ /* XXX assert the tx queue is under the max number */
+ if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
+ device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
+ __func__,
+ bf,
+ bf->bf_state.bfs_tx_queue);
+ bf->bf_flags &= ~ATH_BUF_BUSY;
+ ath_returnbuf_tail(sc, bf);
+ return;
+ }
+ txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
+ ath_txq_freeholdingbuf(sc, txq);
+ txq->axq_holdingbf = bf;
+}
+
+/*
* Return a buffer to the pool and update the 'busy' flag on the
* previous 'tail' entry.
*
@@ -4207,8 +4214,20 @@ ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
+ /*
+ * If this buffer is busy, push it onto the holding queue
+ */
+ if (bf->bf_flags & ATH_BUF_BUSY) {
+ ATH_TXBUF_LOCK(sc);
+ ath_txq_addholdingbuf(sc, bf);
+ ATH_TXBUF_UNLOCK(sc);
+ return;
+ }
+
+ /*
+ * Not a busy buffer, so free normally
+ */
ATH_TXBUF_LOCK(sc);
- ath_tx_update_busy(sc);
ath_returnbuf_tail(sc, bf);
ATH_TXBUF_UNLOCK(sc);
}
@@ -4261,15 +4280,6 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
* NB: this assumes output has been stopped and
* we do not need to block ath_tx_proc
*/
- ATH_TXBUF_LOCK(sc);
- bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
- if (bf != NULL)
- bf->bf_flags &= ~ATH_BUF_BUSY;
- bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
- if (bf != NULL)
- bf->bf_flags &= ~ATH_BUF_BUSY;
- ATH_TXBUF_UNLOCK(sc);
-
for (ix = 0;; ix++) {
ATH_TX_LOCK(sc);
bf = TAILQ_FIRST(&txq->axq_q);
@@ -4331,6 +4341,13 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
}
/*
+ * Free the holding buffer if it exists
+ */
+ ATH_TXBUF_LOCK(sc);
+ ath_txq_freeholdingbuf(sc, txq);
+ ATH_TXBUF_UNLOCK(sc);
+
+ /*
* Drain software queued frames which are on
* active TIDs.
*/
diff --git a/sys/dev/ath/if_ath_sysctl.c b/sys/dev/ath/if_ath_sysctl.c
index bbd1fe8..f1c794e 100644
--- a/sys/dev/ath/if_ath_sysctl.c
+++ b/sys/dev/ath/if_ath_sysctl.c
@@ -361,11 +361,13 @@ ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
- printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d, axq_fifo_depth=%d\n",
+ printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d, "
+ "axq_fifo_depth=%d, holdingbf=%p\n",
i,
sc->sc_txq[i].axq_depth,
sc->sc_txq[i].axq_aggr_depth,
- sc->sc_txq[i].axq_fifo_depth);
+ sc->sc_txq[i].axq_fifo_depth,
+ sc->sc_txq[i].axq_holdingbf);
}
}
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index 33318ea..c7b0fc1 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -3757,7 +3757,6 @@ ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid)
if (bf->bf_state.bfs_isretried) {
bf_next = TAILQ_NEXT(bf, bf_list);
ATH_TID_REMOVE(atid, bf, bf_list);
- atid->axq_depth--;
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
if (! bf->bf_state.bfs_addedbaw)
@@ -4140,11 +4139,10 @@ ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
int tid = bf_first->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
- bf = bf_first;
-
ATH_TX_LOCK(sc);
/* update incomp */
+ bf = bf_first;
while (bf) {
atid->incomp--;
bf = bf->bf_next;
@@ -4160,12 +4158,17 @@ ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
/* Send BAR if required */
/* XXX why would we send a BAR when transitioning to non-aggregation? */
+ /*
+ * XXX TODO: we should likely just tear down the BAR state here,
+ * rather than sending a BAR.
+ */
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
/* Handle frame completion */
+ bf = bf_first;
while (bf) {
bf_next = bf->bf_next;
ath_tx_default_comp(sc, bf, 1);
@@ -4176,8 +4179,6 @@ ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
/*
* Handle completion of an set of aggregate frames.
*
- * XXX for now, simply complete each sub-frame.
- *
* Note: the completion handler is the last descriptor in the aggregate,
* not the last descriptor in the first frame.
*/
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 06cffb6..d3e4aa30 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -329,6 +329,16 @@ struct ath_txq {
u_int axq_intrcnt; /* interrupt count */
u_int32_t *axq_link; /* link ptr in last TX desc */
TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */
+ /*
+ * XXX the holdingbf field is protected by the TXBUF lock
+ * for now, NOT the TX lock.
+ *
+ * Architecturally, it would likely be better to move
+ * the holdingbf field to a separate array in ath_softc
+ * just to highlight that it's not protected by the normal
+ * TX path lock.
+ */
+ struct ath_buf *axq_holdingbf; /* holding TX buffer */
char axq_name[12]; /* e.g. "ath0_txq4" */
/* Per-TID traffic queue for software -> hardware TX */
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index 6d933e8..d82f031 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
static puc_config_f puc_config_amc;
static puc_config_f puc_config_diva;
static puc_config_f puc_config_exar;
+static puc_config_f puc_config_exar_pcie;
static puc_config_f puc_config_icbook;
static puc_config_f puc_config_moxa;
static puc_config_f puc_config_oxford_pcie;
@@ -630,6 +631,14 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_8S, 0x10, 0, -1,
},
+ /* The XR17V358 uses the 125MHz PCIe clock as its reference clock. */
+ { 0x13a8, 0x0358, 0xffff, 0,
+ "Exar XR17V358",
+ 125000000,
+ PUC_PORT_8S, 0x10, 0, -1,
+ .config_function = puc_config_exar_pcie
+ },
+
{ 0x13fe, 0x1600, 0x1602, 0x0002,
"Advantech PCI-1602",
DEFAULT_RCLK * 8,
@@ -1186,6 +1195,17 @@ puc_config_exar(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
}
static int
+puc_config_exar_pcie(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
+ intptr_t *res)
+{
+ if (cmd == PUC_CFG_GET_OFS) {
+ *res = port * 0x400;
+ return (0);
+ }
+ return (ENXIO);
+}
+
+static int
puc_config_icbook(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
intptr_t *res)
{
diff --git a/sys/dev/sound/pcm/sndstat.c b/sys/dev/sound/pcm/sndstat.c
index 7171065..c11eaa3 100644
--- a/sys/dev/sound/pcm/sndstat.c
+++ b/sys/dev/sound/pcm/sndstat.c
@@ -345,8 +345,12 @@ sndstat_prepare(struct sbuf *s)
struct snddev_info *d;
int i, j;
- sbuf_printf(s, "FreeBSD Audio Driver (newpcm: %ubit %d/%s)\n",
- (u_int)sizeof(intpcm32_t) << 3, SND_DRV_VERSION, MACHINE_ARCH);
+ if (snd_verbose > 0) {
+ sbuf_printf(s, "FreeBSD Audio Driver (%ubit %d/%s)\n",
+ (u_int)sizeof(intpcm32_t) << 3, SND_DRV_VERSION,
+ MACHINE_ARCH);
+ }
+
if (SLIST_EMPTY(&sndstat_devlist)) {
sbuf_printf(s, "No devices installed.\n");
sbuf_finish(s);
diff --git a/sys/fs/cd9660/cd9660_vnops.c b/sys/fs/cd9660/cd9660_vnops.c
index 21ee0fc..47d4f75 100644
--- a/sys/fs/cd9660/cd9660_vnops.c
+++ b/sys/fs/cd9660/cd9660_vnops.c
@@ -329,7 +329,7 @@ cd9660_read(ap)
if (lblktosize(imp, rablock) < ip->i_size)
error = cluster_read(vp, (off_t)ip->i_size,
lbn, size, NOCRED, uio->uio_resid,
- (ap->a_ioflag >> 16), &bp);
+ (ap->a_ioflag >> 16), 0, &bp);
else
error = bread(vp, lbn, size, NOCRED, &bp);
} else {
diff --git a/sys/fs/ext2fs/ext2_balloc.c b/sys/fs/ext2fs/ext2_balloc.c
index 1c0cc0e..88ad710 100644
--- a/sys/fs/ext2fs/ext2_balloc.c
+++ b/sys/fs/ext2fs/ext2_balloc.c
@@ -276,7 +276,7 @@ ext2_balloc(struct inode *ip, int32_t lbn, int size, struct ucred *cred,
if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
error = cluster_read(vp, ip->i_size, lbn,
(int)fs->e2fs_bsize, NOCRED,
- MAXBSIZE, seqcount, &nbp);
+ MAXBSIZE, seqcount, 0, &nbp);
} else {
error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, &nbp);
}
diff --git a/sys/fs/ext2fs/ext2_vnops.c b/sys/fs/ext2fs/ext2_vnops.c
index 1c0b7a1..77eb74b 100644
--- a/sys/fs/ext2fs/ext2_vnops.c
+++ b/sys/fs/ext2fs/ext2_vnops.c
@@ -1618,10 +1618,11 @@ ext2_read(struct vop_read_args *ap)
if (lblktosize(fs, nextlbn) >= ip->i_size)
error = bread(vp, lbn, size, NOCRED, &bp);
- else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0)
+ else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
error = cluster_read(vp, ip->i_size, lbn, size,
- NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
- else if (seqcount > 1) {
+ NOCRED, blkoffset + uio->uio_resid, seqcount,
+ 0, &bp);
+ } else if (seqcount > 1) {
int nextsize = blksize(fs, ip, nextlbn);
error = breadn(vp, lbn,
size, &nextlbn, &nextsize, 1, NOCRED, &bp);
@@ -1831,7 +1832,7 @@ ext2_write(struct vop_write_args *ap)
} else if (xfersize + blkoffset == fs->e2fs_fsize) {
if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
bp->b_flags |= B_CLUSTEROK;
- cluster_write(vp, bp, ip->i_size, seqcount);
+ cluster_write(vp, bp, ip->i_size, seqcount, 0);
} else {
bawrite(bp);
}
diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c
index 8e045cb..213ae81 100644
--- a/sys/fs/msdosfs/msdosfs_vnops.c
+++ b/sys/fs/msdosfs/msdosfs_vnops.c
@@ -600,7 +600,7 @@ msdosfs_read(ap)
error = bread(vp, lbn, blsize, NOCRED, &bp);
} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
error = cluster_read(vp, dep->de_FileSize, lbn, blsize,
- NOCRED, on + uio->uio_resid, seqcount, &bp);
+ NOCRED, on + uio->uio_resid, seqcount, 0, &bp);
} else if (seqcount > 1) {
rasize = blsize;
error = breadn(vp, lbn,
@@ -820,7 +820,7 @@ msdosfs_write(ap)
else if (n + croffset == pmp->pm_bpcluster) {
if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0)
cluster_write(vp, bp, dep->de_FileSize,
- seqcount);
+ seqcount, 0);
else
bawrite(bp);
} else
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 54c95ff..156fd43 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -1297,6 +1297,7 @@ tmpfs_rename(struct vop_rename_args *v)
cache_purge(fvp);
if (tvp != NULL)
cache_purge(tvp);
+ cache_purge_negative(tdvp);
error = 0;
diff --git a/sys/fs/udf/udf_vnops.c b/sys/fs/udf/udf_vnops.c
index b1a3b1d..abe073e 100644
--- a/sys/fs/udf/udf_vnops.c
+++ b/sys/fs/udf/udf_vnops.c
@@ -478,8 +478,9 @@ udf_read(struct vop_read_args *ap)
rablock = lbn + 1;
if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
if (lblktosize(udfmp, rablock) < fsize) {
- error = cluster_read(vp, fsize, lbn, size, NOCRED,
- uio->uio_resid, (ap->a_ioflag >> 16), &bp);
+ error = cluster_read(vp, fsize, lbn, size,
+ NOCRED, uio->uio_resid,
+ (ap->a_ioflag >> 16), 0, &bp);
} else {
error = bread(vp, lbn, size, NOCRED, &bp);
}
diff --git a/sys/geom/gate/g_gate.c b/sys/geom/gate/g_gate.c
index d1ddd40..c44993a 100644
--- a/sys/geom/gate/g_gate.c
+++ b/sys/geom/gate/g_gate.c
@@ -813,7 +813,7 @@ g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct threa
}
}
ggio->gctl_cmd = bp->bio_cmd;
- if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
+ if (bp->bio_cmd == BIO_WRITE &&
bp->bio_length > ggio->gctl_length) {
mtx_unlock(&sc->sc_queue_mtx);
ggio->gctl_length = bp->bio_length;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index ba63e21..6499986 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -4205,6 +4205,49 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
mtx_unlock(&sysmaps->lock);
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ struct sysmaps *sysmaps;
+ vm_page_t a_pg, b_pg;
+ char *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+ mtx_lock(&sysmaps->lock);
+ if (*sysmaps->CMAP1 != 0)
+ panic("pmap_copy_pages: CMAP1 busy");
+ if (*sysmaps->CMAP2 != 0)
+ panic("pmap_copy_pages: CMAP2 busy");
+ sched_pin();
+ while (xfersize > 0) {
+ invlpg((u_int)sysmaps->CADDR1);
+ invlpg((u_int)sysmaps->CADDR2);
+ a_pg = ma[a_offset >> PAGE_SHIFT];
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ b_pg = mb[b_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
+ pmap_cache_bits(b_pg->md.pat_mode, 0);
+ *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
+ PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
+ a_cp = sysmaps->CADDR1 + a_pg_offset;
+ b_cp = sysmaps->CADDR2 + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ *sysmaps->CMAP1 = 0;
+ *sysmaps->CMAP2 = 0;
+ sched_unpin();
+ mtx_unlock(&sysmaps->lock);
+}
+
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 3c7c3f5..0f7a80f 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -3448,6 +3448,46 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
mtx_unlock(&sysmaps->lock);
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ struct sysmaps *sysmaps;
+ vm_page_t a_pg, b_pg;
+ char *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+ mtx_lock(&sysmaps->lock);
+ if (*sysmaps->CMAP1 != 0)
+ panic("pmap_copy_pages: CMAP1 busy");
+ if (*sysmaps->CMAP2 != 0)
+ panic("pmap_copy_pages: CMAP2 busy");
+ sched_pin();
+ while (xfersize > 0) {
+ a_pg = ma[a_offset >> PAGE_SHIFT];
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ b_pg = mb[b_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(a_pg) | PG_A);
+ PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW |
+ VM_PAGE_TO_MACH(b_pg) | PG_A | PG_M);
+ a_cp = sysmaps->CADDR1 + a_pg_offset;
+ b_cp = sysmaps->CADDR2 + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ PT_SET_MA(sysmaps->CADDR1, 0);
+ PT_SET_MA(sysmaps->CADDR2, 0);
+ sched_unpin();
+ mtx_unlock(&sysmaps->lock);
+}
+
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 1dff1f9..3256600 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -2014,6 +2014,30 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
bcopy(src, dst, PAGE_SIZE);
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_cp = (char *)pmap_page_to_va(ma[a_offset >> PAGE_SHIFT]) +
+ a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_cp = (char *)pmap_page_to_va(mb[b_offset >> PAGE_SHIFT]) +
+ b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
diff --git a/sys/kern/capabilities.conf b/sys/kern/capabilities.conf
index 71c22bf..be01c9d 100644
--- a/sys/kern/capabilities.conf
+++ b/sys/kern/capabilities.conf
@@ -446,9 +446,9 @@ olio_listio
## Operations relative to directory capabilities.
##
faccessat
-fstatat
fchmodat
fchownat
+fstatat
futimesat
linkat
mkdirat
diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c
index bf8141c..d31c832 100644
--- a/sys/kern/kern_racct.c
+++ b/sys/kern/kern_racct.c
@@ -1033,6 +1033,7 @@ racct_proc_throttle(struct proc *p)
p->p_throttled = 1;
FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
switch (td->td_state) {
case TDS_RUNQ:
/*
@@ -1041,27 +1042,24 @@ racct_proc_throttle(struct proc *p)
* TDF_NEEDRESCHED for the thread, so that once it is
* running, it is taken off the cpu as soon as possible.
*/
- thread_lock(td);
td->td_flags |= TDF_NEEDRESCHED;
- thread_unlock(td);
break;
case TDS_RUNNING:
/*
* If the thread is running, we request a context
* switch for it by setting the TDF_NEEDRESCHED flag.
*/
- thread_lock(td);
td->td_flags |= TDF_NEEDRESCHED;
#ifdef SMP
cpuid = td->td_oncpu;
if ((cpuid != NOCPU) && (td != curthread))
ipi_cpu(cpuid, IPI_AST);
#endif
- thread_unlock(td);
break;
default:
break;
}
+ thread_unlock(td);
}
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 1f24e88..11a4bc4 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -100,9 +100,6 @@ void
userret(struct thread *td, struct trapframe *frame)
{
struct proc *p = td->td_proc;
-#ifdef RACCT
- int sig;
-#endif
CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
td->td_name);
@@ -175,12 +172,8 @@ userret(struct thread *td, struct trapframe *frame)
#endif
#ifdef RACCT
PROC_LOCK(p);
- while (p->p_throttled == 1) {
- sig = msleep(p->p_racct, &p->p_mtx, PCATCH | PBDRY, "racct",
- hz);
- if ((sig == EINTR) || (sig == ERESTART))
- break;
- }
+ while (p->p_throttled == 1)
+ msleep(p->p_racct, &p->p_mtx, 0, "racct", 0);
PROC_UNLOCK(p);
#endif
}
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index e962670..ea4fd80 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -92,7 +92,6 @@ m_get2(int size, int how, short type, int flags)
{
struct mb_args args;
struct mbuf *m, *n;
- uma_zone_t zone;
args.flags = flags;
args.type = type;
@@ -101,24 +100,15 @@ m_get2(int size, int how, short type, int flags)
return (uma_zalloc_arg(zone_mbuf, &args, how));
if (size <= MCLBYTES)
return (uma_zalloc_arg(zone_pack, &args, how));
- if (size > MJUM16BYTES)
+
+ if (size > MJUMPAGESIZE)
return (NULL);
m = uma_zalloc_arg(zone_mbuf, &args, how);
if (m == NULL)
return (NULL);
-#if MJUMPAGESIZE != MCLBYTES
- if (size <= MJUMPAGESIZE)
- zone = zone_jumbop;
- else
-#endif
- if (size <= MJUM9BYTES)
- zone = zone_jumbo9;
- else
- zone = zone_jumbo16;
-
- n = uma_zalloc_arg(zone, m, how);
+ n = uma_zalloc_arg(zone_jumbop, m, how);
if (n == NULL) {
uma_zfree(zone_mbuf, m);
return (NULL);
@@ -405,7 +395,7 @@ m_demote(struct mbuf *m0, int all)
m_freem(m->m_nextpkt);
m->m_nextpkt = NULL;
}
- m->m_flags = m->m_flags & (M_EXT|M_RDONLY|M_FREELIST|M_NOFREE);
+ m->m_flags = m->m_flags & (M_EXT|M_RDONLY|M_NOFREE);
}
}
@@ -540,8 +530,8 @@ m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
#if 0
/*
* The mbuf allocator only initializes the pkthdr
- * when the mbuf is allocated with MGETHDR. Many users
- * (e.g. m_copy*, m_prepend) use MGET and then
+ * when the mbuf is allocated with m_gethdr(). Many users
+ * (e.g. m_copy*, m_prepend) use m_get() and then
* smash the pkthdr as needed causing these
* assertions to trip. For now just disable them.
*/
@@ -573,15 +563,15 @@ m_prepend(struct mbuf *m, int len, int how)
struct mbuf *mn;
if (m->m_flags & M_PKTHDR)
- MGETHDR(mn, how, m->m_type);
+ mn = m_gethdr(how, m->m_type);
else
- MGET(mn, how, m->m_type);
+ mn = m_get(how, m->m_type);
if (mn == NULL) {
m_freem(m);
return (NULL);
}
if (m->m_flags & M_PKTHDR)
- M_MOVE_PKTHDR(mn, m);
+ m_move_pkthdr(mn, m);
mn->m_next = m;
m = mn;
if(m->m_flags & M_PKTHDR) {
@@ -631,9 +621,9 @@ m_copym(struct mbuf *m, int off0, int len, int wait)
break;
}
if (copyhdr)
- MGETHDR(n, wait, m->m_type);
+ n = m_gethdr(wait, m->m_type);
else
- MGET(n, wait, m->m_type);
+ n = m_get(wait, m->m_type);
*np = n;
if (n == NULL)
goto nospace;
@@ -832,7 +822,7 @@ m_copypacket(struct mbuf *m, int how)
struct mbuf *top, *n, *o;
MBUF_CHECKSLEEP(how);
- MGET(n, how, m->m_type);
+ n = m_get(how, m->m_type);
top = n;
if (n == NULL)
goto nospace;
@@ -850,7 +840,7 @@ m_copypacket(struct mbuf *m, int how)
m = m->m_next;
while (m) {
- MGET(o, how, m->m_type);
+ o = m_get(how, m->m_type);
if (o == NULL)
goto nospace;
@@ -1106,12 +1096,11 @@ m_pullup(struct mbuf *n, int len)
} else {
if (len > MHLEN)
goto bad;
- MGET(m, M_NOWAIT, n->m_type);
+ m = m_get(M_NOWAIT, n->m_type);
if (m == NULL)
goto bad;
- m->m_len = 0;
if (n->m_flags & M_PKTHDR)
- M_MOVE_PKTHDR(m, n);
+ m_move_pkthdr(m, n);
}
space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
do {
@@ -1154,12 +1143,11 @@ m_copyup(struct mbuf *n, int len, int dstoff)
if (len > (MHLEN - dstoff))
goto bad;
- MGET(m, M_NOWAIT, n->m_type);
+ m = m_get(M_NOWAIT, n->m_type);
if (m == NULL)
goto bad;
- m->m_len = 0;
if (n->m_flags & M_PKTHDR)
- M_MOVE_PKTHDR(m, n);
+ m_move_pkthdr(m, n);
m->m_data += dstoff;
space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
do {
@@ -1210,7 +1198,7 @@ m_split(struct mbuf *m0, int len0, int wait)
return (NULL);
remain = m->m_len - len;
if (m0->m_flags & M_PKTHDR) {
- MGETHDR(n, wait, m0->m_type);
+ n = m_gethdr(wait, m0->m_type);
if (n == NULL)
return (NULL);
n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
@@ -1236,7 +1224,7 @@ m_split(struct mbuf *m0, int len0, int wait)
m->m_next = NULL;
return (n);
} else {
- MGET(n, wait, m->m_type);
+ n = m_get(wait, m->m_type);
if (n == NULL)
return (NULL);
M_ALIGN(n, remain);
@@ -1889,14 +1877,22 @@ m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
void
m_align(struct mbuf *m, int len)
{
+#ifdef INVARIANTS
+ const char *msg = "%s: not a virgin mbuf";
+#endif
int adjust;
- if (m->m_flags & M_EXT)
+ if (m->m_flags & M_EXT) {
+ KASSERT(m->m_data == m->m_ext.ext_buf, (msg, __func__));
adjust = m->m_ext.ext_size - len;
- else if (m->m_flags & M_PKTHDR)
+ } else if (m->m_flags & M_PKTHDR) {
+ KASSERT(m->m_data == m->m_pktdat, (msg, __func__));
adjust = MHLEN - len;
- else
+ } else {
+ KASSERT(m->m_data == m->m_dat, (msg, __func__));
adjust = MLEN - len;
+ }
+
m->m_data += adjust &~ (sizeof(long)-1);
}
@@ -1978,43 +1974,18 @@ m_unshare(struct mbuf *m0, int how)
}
/*
- * Allocate new space to hold the copy...
- */
- /* XXX why can M_PKTHDR be set past the first mbuf? */
- if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
- /*
- * NB: if a packet header is present we must
- * allocate the mbuf separately from any cluster
- * because M_MOVE_PKTHDR will smash the data
- * pointer and drop the M_EXT marker.
- */
- MGETHDR(n, how, m->m_type);
- if (n == NULL) {
- m_freem(m0);
- return (NULL);
- }
- M_MOVE_PKTHDR(n, m);
- MCLGET(n, how);
- if ((n->m_flags & M_EXT) == 0) {
- m_free(n);
- m_freem(m0);
- return (NULL);
- }
- } else {
- n = m_getcl(how, m->m_type, m->m_flags);
- if (n == NULL) {
- m_freem(m0);
- return (NULL);
- }
- }
- /*
- * ... and copy the data. We deal with jumbo mbufs
- * (i.e. m_len > MCLBYTES) by splitting them into
- * clusters. We could just malloc a buffer and make
- * it external but too many device drivers don't know
- * how to break up the non-contiguous memory when
+ * Allocate new space to hold the copy and copy the data.
+ * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
+ * splitting them into clusters. We could just malloc a
+ * buffer and make it external but too many device drivers
+ * don't know how to break up the non-contiguous memory when
* doing DMA.
*/
+ n = m_getcl(how, m->m_type, m->m_flags);
+ if (n == NULL) {
+ m_freem(m0);
+ return (NULL);
+ }
len = m->m_len;
off = 0;
mfirst = n;
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
index 6325840..93c96bf 100644
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -644,8 +644,8 @@ sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
if (asa->sa_len > MLEN)
return (0);
#endif
- MGET(m, M_NOWAIT, MT_SONAME);
- if (m == 0)
+ m = m_get(M_NOWAIT, MT_SONAME);
+ if (m == NULL)
return (0);
m->m_len = asa->sa_len;
bcopy(asa, mtod(m, caddr_t), asa->sa_len);
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 4827cab..3a2f032 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1701,18 +1701,16 @@ sockargs(mp, buf, buflen, type)
struct mbuf *m;
int error;
- if ((u_int)buflen > MLEN) {
+ if (buflen > MLEN) {
#ifdef COMPAT_OLDSOCK
- if (type == MT_SONAME && (u_int)buflen <= 112)
+ if (type == MT_SONAME && buflen <= 112)
buflen = MLEN; /* unix domain compat. hack */
else
#endif
- if ((u_int)buflen > MCLBYTES)
+ if (buflen > MCLBYTES)
return (EINVAL);
}
- m = m_get(M_WAITOK, type);
- if ((u_int)buflen > MLEN)
- MCLGET(m, M_WAITOK);
+ m = m_get2(buflen, M_WAITOK, type, 0);
m->m_len = buflen;
error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
if (error)
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 6d110ab..d20c829 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -830,9 +830,8 @@ breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
* getblk(). Also starts asynchronous I/O on read-ahead blocks.
*/
int
-breadn_flags(struct vnode * vp, daddr_t blkno, int size,
- daddr_t * rablkno, int *rabsize, int cnt,
- struct ucred * cred, int flags, struct buf **bpp)
+breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
+ int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
{
struct buf *bp;
int rv = 0, readwait = 0;
@@ -1794,8 +1793,9 @@ vfs_bio_awrite(struct buf *bp)
*/
if (ncl != 1) {
BUF_UNLOCK(bp);
- nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
- return nwritten;
+ nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
+ 0);
+ return (nwritten);
}
}
bremfree(bp);
@@ -1808,7 +1808,7 @@ vfs_bio_awrite(struct buf *bp)
nwritten = bp->b_bufsize;
(void) bwrite(bp);
- return nwritten;
+ return (nwritten);
}
/*
@@ -2630,7 +2630,7 @@ vfs_setdirty_locked_object(struct buf *bp)
* prior to issuing the READ. biodone() will *not* clear B_INVAL.
*/
struct buf *
-getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
+getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
int flags)
{
struct buf *bp;
@@ -2708,9 +2708,8 @@ loop:
}
/*
- * check for size inconsistancies for non-VMIO case.
+ * check for size inconsistencies for non-VMIO case.
*/
-
if (bp->b_bcount != size) {
if ((bp->b_flags & B_VMIO) == 0 ||
(size > bp->b_kvasize)) {
@@ -3712,8 +3711,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
void
vfs_bio_clrbuf(struct buf *bp)
{
- int i, j, mask;
- caddr_t sa, ea;
+ int i, j, mask, sa, ea, slide;
if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
clrbuf(bp);
@@ -3731,30 +3729,33 @@ vfs_bio_clrbuf(struct buf *bp)
if ((bp->b_pages[0]->valid & mask) == mask)
goto unlock;
if ((bp->b_pages[0]->valid & mask) == 0) {
- bzero(bp->b_data, bp->b_bufsize);
+ pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
bp->b_pages[0]->valid |= mask;
goto unlock;
}
}
- ea = sa = bp->b_data;
- for(i = 0; i < bp->b_npages; i++, sa = ea) {
- ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
- ea = (caddr_t)(vm_offset_t)ulmin(
- (u_long)(vm_offset_t)ea,
- (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
+ sa = bp->b_offset & PAGE_MASK;
+ slide = 0;
+ for (i = 0; i < bp->b_npages; i++, sa = 0) {
+ slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
+ ea = slide & PAGE_MASK;
+ if (ea == 0)
+ ea = PAGE_SIZE;
if (bp->b_pages[i] == bogus_page)
continue;
- j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
+ j = sa / DEV_BSIZE;
mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
if ((bp->b_pages[i]->valid & mask) == mask)
continue;
if ((bp->b_pages[i]->valid & mask) == 0)
- bzero(sa, ea - sa);
+ pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
else {
for (; sa < ea; sa += DEV_BSIZE, j++) {
- if ((bp->b_pages[i]->valid & (1 << j)) == 0)
- bzero(sa, DEV_BSIZE);
+ if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
+ pmap_zero_page_area(bp->b_pages[i],
+ sa, DEV_BSIZE);
+ }
}
}
bp->b_pages[i]->valid |= mask;
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 70937a2..28aa4ff 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -84,15 +84,9 @@ extern vm_page_t bogus_page;
* cluster_read replaces bread.
*/
int
-cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
- struct vnode *vp;
- u_quad_t filesize;
- daddr_t lblkno;
- long size;
- struct ucred *cred;
- long totread;
- int seqcount;
- struct buf **bpp;
+cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
+ struct ucred *cred, long totread, int seqcount, int gbflags,
+ struct buf **bpp)
{
struct buf *bp, *rbp, *reqbp;
struct bufobj *bo;
@@ -569,14 +563,14 @@ cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len)
{
int r = 0;
- switch(write_behind) {
+ switch (write_behind) {
case 2:
if (start_lbn < len)
break;
start_lbn -= len;
/* FALLTHROUGH */
case 1:
- r = cluster_wbuild(vp, size, start_lbn, len);
+ r = cluster_wbuild(vp, size, start_lbn, len, 0);
/* FALLTHROUGH */
default:
/* FALLTHROUGH */
@@ -596,7 +590,8 @@ cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len)
* 4. end of a cluster - asynchronously write cluster
*/
void
-cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount)
+cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
+ int gbflags)
{
daddr_t lbn;
int maxclen, cursize;
@@ -742,11 +737,8 @@ cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount)
* the current block (if last_bp == NULL).
*/
int
-cluster_wbuild(vp, size, start_lbn, len)
- struct vnode *vp;
- long size;
- daddr_t start_lbn;
- int len;
+cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
+ int gbflags)
{
struct buf *bp, *tbp;
struct bufobj *bo;
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index fc78235..1c361bd 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -445,7 +445,7 @@ sys_getfsstat(td, uap)
/*
* If (bufsize > 0 && bufseg == UIO_SYSSPACE)
- * The caller is responsible for freeing memory which will be allocated
+ * The caller is responsible for freeing memory which will be allocated
* in '*buf'.
*/
int
@@ -971,7 +971,7 @@ flags_to_rights(int flags)
/* FALLTHROUGH */
case O_WRONLY:
rights |= CAP_WRITE;
- if (!(flags & O_APPEND))
+ if (!(flags & (O_APPEND | O_TRUNC)))
rights |= CAP_SEEK;
break;
}
@@ -2843,7 +2843,6 @@ sys_lchmod(td, uap)
uap->mode, AT_SYMLINK_NOFOLLOW));
}
-
int
kern_fchmodat(struct thread *td, int fd, char *path, enum uio_seg pathseg,
mode_t mode, int flag)
@@ -2854,7 +2853,7 @@ kern_fchmodat(struct thread *td, int fd, char *path, enum uio_seg pathseg,
AUDIT_ARG_MODE(mode);
follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
- NDINIT_ATRIGHTS(&nd, LOOKUP, follow | AUDITVNODE1, pathseg, path, fd,
+ NDINIT_ATRIGHTS(&nd, LOOKUP, follow | AUDITVNODE1, pathseg, path, fd,
CAP_FCHMOD, td);
if ((error = namei(&nd)) != 0)
return (error);
@@ -4679,7 +4678,7 @@ kern_posix_fadvise(struct thread *td, int fd, off_t offset, off_t len,
new = fa;
fp->f_advice = NULL;
} else if (offset <= fa->fa_start &&
- end >= fa->fa_start)
+ end >= fa->fa_start)
fa->fa_start = end + 1;
else if (offset <= fa->fa_end && end >= fa->fa_end)
fa->fa_end = offset - 1;
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index d367340..c8f832f 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -7,6 +7,12 @@
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
+ * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
+ * Copyright (c) 2013 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -1122,6 +1128,45 @@ vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio)
return (error);
}
+int
+vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
+ struct uio *uio)
+{
+ struct thread *td;
+ vm_offset_t iov_base;
+ int cnt, pgadv;
+
+ td = curthread;
+ if ((td->td_pflags & TDP_UIOHELD) == 0 ||
+ uio->uio_segflg != UIO_USERSPACE)
+ return (uiomove_fromphys(ma, offset, xfersize, uio));
+
+ KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
+ cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize;
+ iov_base = (vm_offset_t)uio->uio_iov->iov_base;
+ switch (uio->uio_rw) {
+ case UIO_WRITE:
+ pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma,
+ offset, cnt);
+ break;
+ case UIO_READ:
+ pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK,
+ cnt);
+ break;
+ }
+ pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT);
+ td->td_ma += pgadv;
+ KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
+ pgadv));
+ td->td_ma_cnt -= pgadv;
+ uio->uio_iov->iov_base = (char *)(iov_base + cnt);
+ uio->uio_iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ return (0);
+}
+
+
/*
* File table truncate routine.
*/
diff --git a/sys/libkern/arm/aeabi_unwind.c b/sys/libkern/arm/aeabi_unwind.c
new file mode 100644
index 0000000..098e6e6
--- /dev/null
+++ b/sys/libkern/arm/aeabi_unwind.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+#ifdef __ARM_EABI__
+/* We need to provide these functions never call them */
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr2(void);
+
+void
+__aeabi_unwind_cpp_pr0(void)
+{
+ panic("__aeabi_unwind_cpp_pr0");
+}
+
+void
+__aeabi_unwind_cpp_pr1(void)
+{
+ panic("__aeabi_unwind_cpp_pr1");
+}
+
+void
+__aeabi_unwind_cpp_pr2(void)
+{
+ panic("__aeabi_unwind_cpp_pr2");
+}
+#endif
+
diff --git a/sys/libkern/arm/divsi3.S b/sys/libkern/arm/divsi3.S
index 700ae37..302f179 100644
--- a/sys/libkern/arm/divsi3.S
+++ b/sys/libkern/arm/divsi3.S
@@ -29,6 +29,7 @@ ENTRY_NP(__umodsi3)
add sp, sp, #4 /* unalign stack */
mov r0, r1
ldmfd sp!, {pc}
+END(__umodsi3)
ENTRY_NP(__modsi3)
stmfd sp!, {lr}
@@ -48,6 +49,7 @@ ENTRY_NP(__modsi3)
mvn r0, #0
#endif
RET
+END(__modsi3)
#ifdef __ARM_EABI__
ENTRY_NP(__aeabi_uidiv)
@@ -74,6 +76,11 @@ ENTRY_NP(__udivsi3)
mov r0, r1
mov r1, #0
RET
+#ifdef __ARM_EABI__
+END(__aeabi_uidiv)
+END(__aeabi_uidivmod)
+#endif
+END(__udivsi3)
#ifdef __ARM_EABI__
ENTRY_NP(__aeabi_idiv)
@@ -393,3 +400,9 @@ ENTRY_NP(__divsi3)
addhs r3, r3, r2
mov r0, r3
RET
+#ifdef __ARM_EABI__
+END(__aeabi_idiv)
+END(__aeabi_idivmod)
+#endif
+END(__divsi3)
+
diff --git a/sys/libkern/arm/ffs.S b/sys/libkern/arm/ffs.S
index ba0af49..a43f2b6 100644
--- a/sys/libkern/arm/ffs.S
+++ b/sys/libkern/arm/ffs.S
@@ -82,3 +82,5 @@ ENTRY(ffs)
rsbne r0, r0, #32
RET
#endif
+END(ffs)
+
diff --git a/sys/libkern/arm/ldivmod.S b/sys/libkern/arm/ldivmod.S
index a88db54..26a3944 100644
--- a/sys/libkern/arm/ldivmod.S
+++ b/sys/libkern/arm/ldivmod.S
@@ -53,6 +53,7 @@ ENTRY_NP(__aeabi_ldivmod)
add sp, sp, #8 /* Move sp to the remainder value */
ldmfd sp!, {r2, r3} /* Load the remainder */
RET
+END(__aeabi_ldivmod)
ENTRY_NP(__aeabi_uldivmod)
sub sp, sp, #8 /* Space for the remainder */
@@ -62,6 +63,7 @@ ENTRY_NP(__aeabi_uldivmod)
add sp, sp, #8 /* Move sp to the remainder value */
ldmfd sp!, {r2, r3} /* Load the remainder */
RET
+END(__aeabi_uldivmod)
#endif
diff --git a/sys/libkern/arm/memcpy.S b/sys/libkern/arm/memcpy.S
index b2997db..9fca8f6 100644
--- a/sys/libkern/arm/memcpy.S
+++ b/sys/libkern/arm/memcpy.S
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Andrew Turner
+ * Copyright (C) 2013 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
ENTRY_NP(__aeabi_memcpy)
b memcpy
+END(__aeabi_memcpy)
#endif
diff --git a/sys/libkern/arm/memset.S b/sys/libkern/arm/memset.S
new file mode 100644
index 0000000..57d2507
--- /dev/null
+++ b/sys/libkern/arm/memset.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef __ARM_EABI__
+
+/*
+ * This implements
+ * void __aeabi_memset(void *dest, size_t len, int c)
+ * by calling:
+ * void *memset(dest, c, len)
+ *
+ * The arguments are in r0-r2, r3 can be used as a scratch register.
+ */
+ENTRY_NP(__aeabi_memset)
+ mov r3, r2
+ mov r2, r1
+ mov r1, r3
+ b memset
+END(__aeabi_memset)
+
+#endif
+
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 9e1b812..4fe6ebe 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -2576,6 +2576,51 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
}
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ char *a_cp, *b_cp;
+ vm_page_t a_m, b_m;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ vm_paddr_t a_phys, b_phys;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_m = ma[a_offset >> PAGE_SHIFT];
+ a_phys = VM_PAGE_TO_PHYS(a_m);
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_m = mb[b_offset >> PAGE_SHIFT];
+ b_phys = VM_PAGE_TO_PHYS(b_m);
+ if (MIPS_DIRECT_MAPPABLE(a_phys) &&
+ MIPS_DIRECT_MAPPABLE(b_phys)) {
+ pmap_flush_pvcache(a_m);
+ mips_dcache_wbinv_range_index(
+ MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
+ a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
+ a_pg_offset;
+ b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
+ b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
+ } else {
+ a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
+ b_cp = (char *)a_cp + PAGE_SIZE;
+ a_cp += a_pg_offset;
+ b_cp += b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
+ pmap_lmem_unmap();
+ }
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
diff --git a/sys/modules/uart/Makefile b/sys/modules/uart/Makefile
index d8a4869..06b005b 100644
--- a/sys/modules/uart/Makefile
+++ b/sys/modules/uart/Makefile
@@ -4,6 +4,9 @@
.if ${MACHINE_CPUARCH} == "sparc64"
uart_bus_ebus= uart_bus_ebus.c
+.endif
+
+.if ${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "sparc64"
ofw_bus_if= ofw_bus_if.h
.endif
diff --git a/sys/net/bridgestp.c b/sys/net/bridgestp.c
index 43dd005..07cf219 100644
--- a/sys/net/bridgestp.c
+++ b/sys/net/bridgestp.c
@@ -234,7 +234,7 @@ bstp_transmit_tcn(struct bstp_state *bs, struct bstp_port *bp)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return;
@@ -348,7 +348,7 @@ bstp_send_bpdu(struct bstp_state *bs, struct bstp_port *bp,
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return;
diff --git a/sys/net/if_gre.c b/sys/net/if_gre.c
index 27e58d8..48255b0 100644
--- a/sys/net/if_gre.c
+++ b/sys/net/if_gre.c
@@ -384,8 +384,7 @@ gre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
mob_h.hcrc = gre_in_cksum((u_int16_t *)&mob_h, msiz);
if ((m->m_data - msiz) < m->m_pktdat) {
- /* need new mbuf */
- MGETHDR(m0, M_NOWAIT, MT_DATA);
+ m0 = m_gethdr(M_NOWAIT, MT_DATA);
if (m0 == NULL) {
_IF_DROP(&ifp->if_snd);
m_freem(m);
diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c
index 9a08a76..c3781d5 100644
--- a/sys/net/rtsock.c
+++ b/sys/net/rtsock.c
@@ -1118,20 +1118,17 @@ rt_msg1(int type, struct rt_addrinfo *rtinfo)
default:
len = sizeof(struct rt_msghdr);
}
- if (len > MCLBYTES)
- panic("rt_msg1");
- m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m && len > MHLEN) {
- MCLGET(m, M_NOWAIT);
- if ((m->m_flags & M_EXT) == 0) {
- m_free(m);
- m = NULL;
- }
- }
+
+ /* XXXGL: can we use MJUMPAGESIZE cluster here? */
+ KASSERT(len <= MCLBYTES, ("%s: message too big", __func__));
+ if (len > MHLEN)
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ else
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return (m);
+
m->m_pkthdr.len = m->m_len = len;
- m->m_pkthdr.rcvif = NULL;
rtm = mtod(m, struct rt_msghdr *);
bzero((caddr_t)rtm, len);
for (i = 0; i < RTAX_MAX; i++) {
diff --git a/sys/netinet/igmp.c b/sys/netinet/igmp.c
index e87da5e..fd7eb3c 100644
--- a/sys/netinet/igmp.c
+++ b/sys/netinet/igmp.c
@@ -523,7 +523,7 @@ igmp_ra_alloc(void)
struct mbuf *m;
struct ipoption *p;
- MGET(m, M_NOWAIT, MT_DATA);
+ m = m_get(M_WAITOK, MT_DATA);
p = mtod(m, struct ipoption *);
p->ipopt_dst.s_addr = INADDR_ANY;
p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
@@ -2203,7 +2203,7 @@ igmp_v1v2_queue_report(struct in_multi *inm, const int type)
ifp = inm->inm_ifp;
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOMEM);
MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
diff --git a/sys/netinet/ip_carp.c b/sys/netinet/ip_carp.c
index ae3da96..eaff4f6 100644
--- a/sys/netinet/ip_carp.c
+++ b/sys/netinet/ip_carp.c
@@ -764,7 +764,7 @@ carp_send_ad_locked(struct carp_softc *sc)
if (sc->sc_naddrs) {
struct ip *ip;
- MGETHDR(m, M_NOWAIT, MT_HEADER);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
CARPSTATS_INC(carps_onomem);
goto resched;
@@ -832,7 +832,7 @@ carp_send_ad_locked(struct carp_softc *sc)
if (sc->sc_naddrs6) {
struct ip6_hdr *ip6;
- MGETHDR(m, M_NOWAIT, MT_HEADER);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
CARPSTATS_INC(carps_onomem);
goto resched;
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 555b5d0..1f1122c 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -1406,7 +1406,7 @@ ip_forward(struct mbuf *m, int srcrt)
* assume exclusive access to the IP header in `m', so any
* data in a cluster may change before we reach icmp_error().
*/
- MGETHDR(mcopy, M_NOWAIT, m->m_type);
+ mcopy = m_gethdr(M_NOWAIT, m->m_type);
if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) {
/*
* It's probably ok if the pkthdr dup fails (because
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index 6228933..db8238f 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -2083,13 +2083,12 @@ bw_upcalls_send(void)
* Allocate a new mbuf, initialize it with the header and
* the payload for the pending calls.
*/
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
return;
}
- m->m_len = m->m_pkthdr.len = 0;
m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&V_bw_upcalls[0]);
@@ -2430,7 +2429,7 @@ pim_register_send_upcall(struct ip *ip, struct vif *vifp,
/*
* Add a new mbuf with an upcall header
*/
- MGETHDR(mb_first, M_NOWAIT, MT_DATA);
+ mb_first = m_gethdr(M_NOWAIT, MT_DATA);
if (mb_first == NULL) {
m_freem(mb_copy);
return ENOBUFS;
@@ -2488,7 +2487,7 @@ pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy,
/*
* Add a new mbuf with the encapsulating header
*/
- MGETHDR(mb_first, M_NOWAIT, MT_DATA);
+ mb_first = m_gethdr(M_NOWAIT, MT_DATA);
if (mb_first == NULL) {
m_freem(mb_copy);
return ENOBUFS;
diff --git a/sys/netinet/ip_options.c b/sys/netinet/ip_options.c
index 239c699..8911992 100644
--- a/sys/netinet/ip_options.c
+++ b/sys/netinet/ip_options.c
@@ -495,12 +495,12 @@ ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
if (p->ipopt_dst.s_addr)
ip->ip_dst = p->ipopt_dst;
if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) {
- MGETHDR(n, M_NOWAIT, MT_DATA);
+ n = m_gethdr(M_NOWAIT, MT_DATA);
if (n == NULL) {
*phlen = 0;
return (m);
}
- M_MOVE_PKTHDR(n, m);
+ m_move_pkthdr(n, m);
n->m_pkthdr.rcvif = NULL;
n->m_pkthdr.len += optlen;
m->m_len -= sizeof(struct ip);
diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c
index 166bef5..fd418e9 100644
--- a/sys/netinet/ip_output.c
+++ b/sys/netinet/ip_output.c
@@ -784,7 +784,7 @@ smart_frag_failure:
struct mbuf *m;
int mhlen = sizeof (struct ip);
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
error = ENOBUFS;
IPSTAT_INC(ips_odropped);
@@ -951,7 +951,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt)
error = EMSGSIZE;
break;
}
- MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
+ m = m_get(sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
if (m == NULL) {
error = ENOBUFS;
break;
diff --git a/sys/netinet/libalias/alias.c b/sys/netinet/libalias/alias.c
index 33876c4..8f23699 100644
--- a/sys/netinet/libalias/alias.c
+++ b/sys/netinet/libalias/alias.c
@@ -1749,26 +1749,22 @@ LibAliasUnLoadAllModule(void)
struct mbuf *
m_megapullup(struct mbuf *m, int len) {
struct mbuf *mcl;
-
+
if (len > m->m_pkthdr.len)
goto bad;
-
- /* Do not reallocate packet if it is sequentional,
- * writable and has some extra space for expansion.
- * XXX: Constant 100bytes is completely empirical. */
-#define RESERVE 100
- if (m->m_next == NULL && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= RESERVE)
+
+ if (m->m_next == NULL && M_WRITABLE(m))
return (m);
- mcl = m_get2(len + RESERVE, M_NOWAIT, MT_DATA, M_PKTHDR);
+ mcl = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR);
if (mcl == NULL)
goto bad;
-
+ m_align(mcl, len);
m_move_pkthdr(mcl, m);
m_copydata(m, 0, len, mtod(mcl, caddr_t));
mcl->m_len = mcl->m_pkthdr.len = len;
m_freem(m);
-
+
return (mcl);
bad:
m_freem(m);
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index ac7ac31..bcfed8a 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -842,23 +842,19 @@ send:
TCPSTAT_INC(tcps_sndpack);
TCPSTAT_ADD(tcps_sndbyte, len);
}
- MGETHDR(m, M_NOWAIT, MT_DATA);
+#ifdef INET6
+ if (MHLEN < hdrlen + max_linkhdr)
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ else
+#endif
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+
if (m == NULL) {
SOCKBUF_UNLOCK(&so->so_snd);
error = ENOBUFS;
goto out;
}
-#ifdef INET6
- if (MHLEN < hdrlen + max_linkhdr) {
- MCLGET(m, M_NOWAIT);
- if ((m->m_flags & M_EXT) == 0) {
- SOCKBUF_UNLOCK(&so->so_snd);
- m_freem(m);
- error = ENOBUFS;
- goto out;
- }
- }
-#endif
+
m->m_data += max_linkhdr;
m->m_len = hdrlen;
@@ -902,7 +898,7 @@ send:
else
TCPSTAT_INC(tcps_sndwinup);
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
error = ENOBUFS;
goto out;
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 3c9dd34..24e4db2 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -1860,7 +1860,7 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp)
if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
return (0);
- MGETHDR(m, M_NOWAIT, MT_DATA);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (!m)
return (0);
diff --git a/sys/netinet6/icmp6.c b/sys/netinet6/icmp6.c
index 0cd6e5a..8772779 100644
--- a/sys/netinet6/icmp6.c
+++ b/sys/netinet6/icmp6.c
@@ -578,25 +578,18 @@ icmp6_input(struct mbuf **mp, int *offp, int proto)
if ((n->m_flags & M_EXT) != 0
|| n->m_len < off + sizeof(struct icmp6_hdr)) {
struct mbuf *n0 = n;
- const int maxlen = sizeof(*nip6) + sizeof(*nicmp6);
int n0len;
- MGETHDR(n, M_NOWAIT, n0->m_type);
- n0len = n0->m_pkthdr.len; /* save for use below */
- if (n)
- M_MOVE_PKTHDR(n, n0); /* FIB copied. */
- if (n && maxlen >= MHLEN) {
- MCLGET(n, M_NOWAIT);
- if ((n->m_flags & M_EXT) == 0) {
- m_free(n);
- n = NULL;
- }
- }
+ CTASSERT(sizeof(*nip6) + sizeof(*nicmp6) <= MHLEN);
+ n = m_gethdr(M_NOWAIT, n0->m_type);
if (n == NULL) {
/* Give up remote */
m_freem(n0);
break;
}
+
+ m_move_pkthdr(n, n0); /* FIB copied. */
+ n0len = n0->m_pkthdr.len; /* save for use below */
/*
* Copy IPv6 and ICMPv6 only.
*/
@@ -683,7 +676,7 @@ icmp6_input(struct mbuf **mp, int *offp, int proto)
} else {
struct prison *pr;
u_char *p;
- int maxlen, maxhlen, hlen;
+ int maxhlen, hlen;
/*
* XXX: this combination of flags is pointless,
@@ -694,20 +687,14 @@ icmp6_input(struct mbuf **mp, int *offp, int proto)
if (code != 0)
goto badcode;
- maxlen = sizeof(*nip6) + sizeof(*nicmp6) + 4;
- if (maxlen >= MCLBYTES) {
+
+ CTASSERT(sizeof(*nip6) + sizeof(*nicmp6) + 4 <= MHLEN);
+ n = m_gethdr(M_NOWAIT, m->m_type);
+ if (n == NULL) {
/* Give up remote */
break;
}
- MGETHDR(n, M_NOWAIT, m->m_type);
- if (n && maxlen > MHLEN) {
- MCLGET(n, M_NOWAIT);
- if ((n->m_flags & M_EXT) == 0) {
- m_free(n);
- n = NULL;
- }
- }
- if (n && !m_dup_pkthdr(n, m, M_NOWAIT)) {
+ if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
/*
* Previous code did a blind M_COPY_PKTHDR
* and said "just for rcvif". If true, then
@@ -718,13 +705,8 @@ icmp6_input(struct mbuf **mp, int *offp, int proto)
m_free(n);
n = NULL;
}
- if (n == NULL) {
- /* Give up remote */
- break;
- }
- n->m_pkthdr.rcvif = NULL;
- n->m_len = 0;
- maxhlen = M_TRAILINGSPACE(n) - maxlen;
+ maxhlen = M_TRAILINGSPACE(n) -
+ (sizeof(*nip6) + sizeof(*nicmp6) + 4);
pr = curthread->td_ucred->cr_prison;
mtx_lock(&pr->pr_mtx);
hlen = strlen(pr->pr_hostname);
@@ -1494,26 +1476,23 @@ ni6_input(struct mbuf *m, int off)
break;
}
- /* allocate an mbuf to reply. */
- MGETHDR(n, M_NOWAIT, m->m_type);
+ /* Allocate an mbuf to reply. */
+ if (replylen > MCLBYTES) {
+ /*
+ * XXX: should we try to allocate more? But MCLBYTES
+ * is probably much larger than IPV6_MMTU...
+ */
+ goto bad;
+ }
+ if (replylen > MHLEN)
+ n = m_getcl(M_NOWAIT, m->m_type, M_PKTHDR);
+ else
+ n = m_gethdr(M_NOWAIT, m->m_type);
if (n == NULL) {
m_freem(m);
return (NULL);
}
- M_MOVE_PKTHDR(n, m); /* just for recvif and FIB */
- if (replylen > MHLEN) {
- if (replylen > MCLBYTES) {
- /*
- * XXX: should we try to allocate more? But MCLBYTES
- * is probably much larger than IPV6_MMTU...
- */
- goto bad;
- }
- MCLGET(n, M_NOWAIT);
- if ((n->m_flags & M_EXT) == 0) {
- goto bad;
- }
- }
+ m_move_pkthdr(n, m); /* just for recvif and FIB */
n->m_pkthdr.len = n->m_len = replylen;
/* copy mbuf header and IPv6 + Node Information base headers */
@@ -1608,16 +1587,13 @@ ni6_nametodns(const char *name, int namelen, int old)
else
len = MCLBYTES;
- /* because MAXHOSTNAMELEN is usually 256, we use cluster mbuf */
- MGET(m, M_NOWAIT, MT_DATA);
- if (m && len > MLEN) {
- MCLGET(m, M_NOWAIT);
- if ((m->m_flags & M_EXT) == 0)
- goto fail;
- }
- if (!m)
+ /* Because MAXHOSTNAMELEN is usually 256, we use cluster mbuf. */
+ if (len > MLEN)
+ m = m_getcl(M_NOWAIT, MT_DATA, 0);
+ else
+ m = m_get(M_NOWAIT, MT_DATA);
+ if (m == NULL)
goto fail;
- m->m_next = NULL;
if (old) {
m->m_len = len;
@@ -2063,7 +2039,7 @@ icmp6_rip6_input(struct mbuf **mp, int off)
*/
if ((m->m_flags & M_EXT) && m->m_next == NULL &&
m->m_len <= MHLEN) {
- MGET(n, M_NOWAIT, m->m_type);
+ n = m_get(M_NOWAIT, m->m_type);
if (n != NULL) {
if (m_dup_pkthdr(n, m, M_NOWAIT)) {
bcopy(m->m_data, n->m_data,
@@ -2113,7 +2089,7 @@ icmp6_rip6_input(struct mbuf **mp, int off)
m->m_len <= MHLEN) {
struct mbuf *n;
- MGET(n, M_NOWAIT, m->m_type);
+ n = m_get(M_NOWAIT, m->m_type);
if (n != NULL) {
if (m_dup_pkthdr(n, m, M_NOWAIT)) {
bcopy(m->m_data, n->m_data, m->m_len);
@@ -2592,14 +2568,10 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt)
#if IPV6_MMTU >= MCLBYTES
# error assumption failed about IPV6_MMTU and MCLBYTES
#endif
- MGETHDR(m, M_NOWAIT, MT_HEADER);
- if (m && IPV6_MMTU >= MHLEN)
- MCLGET(m, M_NOWAIT);
- if (!m)
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
goto fail;
M_SETFIB(m, rt->rt_fibnum);
- m->m_pkthdr.rcvif = NULL;
- m->m_len = 0;
maxlen = M_TRAILINGSPACE(m);
maxlen = min(IPV6_MMTU, maxlen);
/* just for safety */
diff --git a/sys/netinet6/ip6_input.c b/sys/netinet6/ip6_input.c
index 45c4ff6..e037737 100644
--- a/sys/netinet6/ip6_input.c
+++ b/sys/netinet6/ip6_input.c
@@ -497,21 +497,16 @@ ip6_input(struct mbuf *m)
if (m && m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
struct mbuf *n;
- MGETHDR(n, M_NOWAIT, MT_HEADER);
- if (n)
- M_MOVE_PKTHDR(n, m);
- if (n && n->m_pkthdr.len > MHLEN) {
- MCLGET(n, M_NOWAIT);
- if ((n->m_flags & M_EXT) == 0) {
- m_freem(n);
- n = NULL;
- }
- }
+ if (m->m_pkthdr.len > MHLEN)
+ n = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ else
+ n = m_gethdr(M_NOWAIT, MT_DATA);
if (n == NULL) {
m_freem(m);
return; /* ENOBUFS */
}
+ m_move_pkthdr(n, m);
m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
n->m_len = n->m_pkthdr.len;
m_freem(m);
@@ -1667,22 +1662,12 @@ ip6_pullexthdr(struct mbuf *m, size_t off, int nxt)
else
elen = (ip6e.ip6e_len + 1) << 3;
- MGET(n, M_NOWAIT, MT_DATA);
- if (n && elen >= MLEN) {
- MCLGET(n, M_NOWAIT);
- if ((n->m_flags & M_EXT) == 0) {
- m_free(n);
- n = NULL;
- }
- }
- if (!n)
- return NULL;
-
- n->m_len = 0;
- if (elen >= M_TRAILINGSPACE(n)) {
- m_free(n);
+ if (elen > MLEN)
+ n = m_getcl(M_NOWAIT, MT_DATA, 0);
+ else
+ n = m_get(M_NOWAIT, MT_DATA);
+ if (n == NULL)
return NULL;
- }
m_copydata(m, off, elen, mtod(n, caddr_t));
n->m_len = elen;
diff --git a/sys/netinet6/ip6_mroute.c b/sys/netinet6/ip6_mroute.c
index a221110..aeeb644 100644
--- a/sys/netinet6/ip6_mroute.c
+++ b/sys/netinet6/ip6_mroute.c
@@ -1698,11 +1698,10 @@ register_send(struct ip6_hdr *ip6, struct mif6 *mif, struct mbuf *m)
#endif
++pim6stat.pim6s_snd_registers;
- /* Make a copy of the packet to send to the user level process */
- MGETHDR(mm, M_NOWAIT, MT_HEADER);
+ /* Make a copy of the packet to send to the user level process. */
+ mm = m_gethdr(M_NOWAIT, MT_DATA);
if (mm == NULL)
return (ENOBUFS);
- mm->m_pkthdr.rcvif = NULL;
mm->m_data += max_linkhdr;
mm->m_len = sizeof(struct ip6_hdr);
diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c
index 5e8c11f..e5042d2 100644
--- a/sys/netinet6/ip6_output.c
+++ b/sys/netinet6/ip6_output.c
@@ -774,9 +774,7 @@ again:
/*
* XXX: ip6_mforward expects that rcvif is NULL
* when it is called from the originating path.
- * However, it is not always the case, since
- * some versions of MGETHDR() does not
- * initialize the field.
+ * However, it may not always be the case.
*/
m->m_pkthdr.rcvif = NULL;
if (ip6_mforward(ip6, ifp, m) != 0) {
@@ -1122,13 +1120,12 @@ passout:
*/
m0 = m;
for (off = hlen; off < tlen; off += len) {
- MGETHDR(m, M_NOWAIT, MT_HEADER);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (!m) {
error = ENOBUFS;
V_ip6stat.ip6s_odropped++;
goto sendorfree;
}
- m->m_pkthdr.rcvif = NULL;
m->m_flags = m0->m_flags & M_COPYFLAGS; /* incl. FIB */
*mnext = m;
mnext = &m->m_nextpkt;
@@ -1222,17 +1219,12 @@ ip6_copyexthdr(struct mbuf **mp, caddr_t hdr, int hlen)
if (hlen > MCLBYTES)
return (ENOBUFS); /* XXX */
- MGET(m, M_NOWAIT, MT_DATA);
- if (!m)
+ if (hlen > MLEN)
+ m = m_getcl(M_NOWAIT, MT_DATA, 0);
+ else
+ m = m_get(M_NOWAIT, MT_DATA);
+ if (m == NULL)
return (ENOBUFS);
-
- if (hlen > MLEN) {
- MCLGET(m, M_NOWAIT);
- if ((m->m_flags & M_EXT) == 0) {
- m_free(m);
- return (ENOBUFS);
- }
- }
m->m_len = hlen;
if (hdr)
bcopy(hdr, mtod(m, caddr_t), hlen);
@@ -1260,8 +1252,8 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen)
* Otherwise, use it to store the options.
*/
if (exthdrs->ip6e_hbh == 0) {
- MGET(mopt, M_NOWAIT, MT_DATA);
- if (mopt == 0)
+ mopt = m_get(M_NOWAIT, MT_DATA);
+ if (mopt == NULL)
return (ENOBUFS);
mopt->m_len = JUMBOOPTLEN;
optbuf = mtod(mopt, u_char *);
@@ -1292,15 +1284,8 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen)
* As a consequence, we must always prepare a cluster
* at this point.
*/
- MGET(n, M_NOWAIT, MT_DATA);
- if (n) {
- MCLGET(n, M_NOWAIT);
- if ((n->m_flags & M_EXT) == 0) {
- m_freem(n);
- n = NULL;
- }
- }
- if (!n)
+ n = m_getcl(M_NOWAIT, MT_DATA, 0);
+ if (n == NULL)
return (ENOBUFS);
n->m_len = oldoptlen + JUMBOOPTLEN;
bcopy(mtod(mopt, caddr_t), mtod(n, caddr_t),
@@ -1369,8 +1354,8 @@ ip6_insertfraghdr(struct mbuf *m0, struct mbuf *m, int hlen,
/* allocate a new mbuf for the fragment header */
struct mbuf *mfrg;
- MGET(mfrg, M_NOWAIT, MT_DATA);
- if (mfrg == 0)
+ mfrg = m_get(M_NOWAIT, MT_DATA);
+ if (mfrg == NULL)
return (ENOBUFS);
mfrg->m_len = sizeof(struct ip6_frag);
*frghdrp = mtod(mfrg, struct ip6_frag *);
@@ -3045,12 +3030,12 @@ ip6_splithdr(struct mbuf *m, struct ip6_exthdrs *exthdrs)
ip6 = mtod(m, struct ip6_hdr *);
if (m->m_len > sizeof(*ip6)) {
- MGETHDR(mh, M_NOWAIT, MT_HEADER);
- if (mh == 0) {
+ mh = m_gethdr(M_NOWAIT, MT_DATA);
+ if (mh == NULL) {
m_freem(m);
return ENOBUFS;
}
- M_MOVE_PKTHDR(mh, m);
+ m_move_pkthdr(mh, m);
MH_ALIGN(mh, sizeof(*ip6));
m->m_len -= sizeof(*ip6);
m->m_data += sizeof(*ip6);
diff --git a/sys/netinet6/mld6.c b/sys/netinet6/mld6.c
index 0d9e300..560e8d6 100644
--- a/sys/netinet6/mld6.c
+++ b/sys/netinet6/mld6.c
@@ -1799,13 +1799,13 @@ mld_v1_transmit_report(struct in6_multi *in6m, const int type)
ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
/* ia may be NULL if link-local address is tentative. */
- MGETHDR(mh, M_NOWAIT, MT_HEADER);
+ mh = m_gethdr(M_NOWAIT, MT_DATA);
if (mh == NULL) {
if (ia != NULL)
ifa_free(&ia->ia_ifa);
return (ENOMEM);
}
- MGET(md, M_NOWAIT, MT_DATA);
+ md = m_get(M_NOWAIT, MT_DATA);
if (md == NULL) {
m_free(mh);
if (ia != NULL)
@@ -3173,7 +3173,7 @@ mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
if (ia == NULL)
CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
- MGETHDR(mh, M_NOWAIT, MT_HEADER);
+ mh = m_gethdr(M_NOWAIT, MT_DATA);
if (mh == NULL) {
if (ia != NULL)
ifa_free(&ia->ia_ifa);
diff --git a/sys/netinet6/nd6_nbr.c b/sys/netinet6/nd6_nbr.c
index 7fe75bf..532c9b8 100644
--- a/sys/netinet6/nd6_nbr.c
+++ b/sys/netinet6/nd6_nbr.c
@@ -419,17 +419,12 @@ nd6_ns_output(struct ifnet *ifp, const struct in6_addr *daddr6,
return;
}
- MGETHDR(m, M_NOWAIT, MT_DATA);
- if (m && max_linkhdr + maxlen >= MHLEN) {
- MCLGET(m, M_NOWAIT);
- if ((m->m_flags & M_EXT) == 0) {
- m_free(m);
- m = NULL;
- }
- }
+ if (max_linkhdr + maxlen > MHLEN)
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ else
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return;
- m->m_pkthdr.rcvif = NULL;
bzero(&ro, sizeof(ro));
@@ -997,17 +992,12 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
return;
}
- MGETHDR(m, M_NOWAIT, MT_DATA);
- if (m && max_linkhdr + maxlen >= MHLEN) {
- MCLGET(m, M_NOWAIT);
- if ((m->m_flags & M_EXT) == 0) {
- m_free(m);
- m = NULL;
- }
- }
+ if (max_linkhdr + maxlen > MHLEN)
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ else
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return;
- m->m_pkthdr.rcvif = NULL;
M_SETFIB(m, fibnum);
if (IN6_IS_ADDR_MULTICAST(&daddr6)) {
diff --git a/sys/netipsec/key.c b/sys/netipsec/key.c
index a4b7ecb..67afed2 100644
--- a/sys/netipsec/key.c
+++ b/sys/netipsec/key.c
@@ -547,7 +547,6 @@ static const char *key_getfqdn __P((void));
static const char *key_getuserfqdn __P((void));
#endif
static void key_sa_chgstate __P((struct secasvar *, u_int8_t));
-static struct mbuf *key_alloc_mbuf __P((int));
static __inline void
sa_initref(struct secasvar *sav)
@@ -1634,15 +1633,11 @@ key_sp2msg(sp)
tlen = key_getspreqmsglen(sp);
- m = key_alloc_mbuf(tlen);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
- return NULL;
- }
-
+ m = m_get2(tlen, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
+ return (NULL);
+ m_align(m, tlen);
m->m_len = tlen;
- m->m_next = NULL;
xpl = mtod(m, struct sadb_x_policy *);
bzero(xpl, tlen);
@@ -1732,12 +1727,11 @@ key_gather_mbuf(m, mhp, ndeep, nitem, va_alist)
mtod(n, caddr_t));
} else if (i < ndeep) {
len = mhp->extlen[idx];
- n = key_alloc_mbuf(len);
- if (!n || n->m_next) { /*XXX*/
- if (n)
- m_freem(n);
+ n = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (n == NULL)
goto fail;
- }
+ m_align(n, len);
+ n->m_len = len;
m_copydata(m, mhp->extoff[idx], mhp->extlen[idx],
mtod(n, caddr_t));
} else {
@@ -2602,13 +2596,13 @@ key_spdexpire(sp)
/* create lifetime extension (current and hard) */
len = PFKEY_ALIGN8(sizeof(*lt)) * 2;
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL) {
error = ENOBUFS;
goto fail;
}
+ m_align(m, len);
+ m->m_len = len;
bzero(mtod(m, caddr_t), len);
lt = mtod(m, struct sadb_lifetime *);
lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
@@ -3602,15 +3596,12 @@ key_setsadbsa(sav)
int len;
len = PFKEY_ALIGN8(sizeof(struct sadb_sa));
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
- return NULL;
- }
-
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
+ return (NULL);
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_sa *);
-
bzero(p, len);
p->sadb_sa_len = PFKEY_UNIT64(len);
p->sadb_sa_exttype = SADB_EXT_SA;
@@ -3636,13 +3627,11 @@ key_setsadbaddr(u_int16_t exttype, const struct sockaddr *saddr, u_int8_t prefix
len = PFKEY_ALIGN8(sizeof(struct sadb_address)) +
PFKEY_ALIGN8(saddr->sa_len);
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
- return NULL;
- }
-
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
+ return (NULL);
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_address *);
bzero(p, len);
@@ -3682,13 +3671,11 @@ key_setsadbxsa2(u_int8_t mode, u_int32_t seq, u_int32_t reqid)
size_t len;
len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2));
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
- return NULL;
- }
-
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
+ return (NULL);
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_x_sa2 *);
bzero(p, len);
@@ -3716,13 +3703,11 @@ key_setsadbxtype(u_int16_t type)
len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_type));
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
return (NULL);
- }
-
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_x_nat_t_type *);
bzero(p, len);
@@ -3745,13 +3730,11 @@ key_setsadbxport(u_int16_t port, u_int16_t type)
len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_port));
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
return (NULL);
- }
-
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_x_nat_t_port *);
bzero(p, len);
@@ -3822,13 +3805,11 @@ key_setsadbxpolicy(u_int16_t type, u_int8_t dir, u_int32_t id)
size_t len;
len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy));
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
- return NULL;
- }
-
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL)
+ return (NULL);
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_x_policy *);
bzero(p, len);
@@ -6951,13 +6932,13 @@ key_expire(struct secasvar *sav)
/* create lifetime extension (current and soft) */
len = PFKEY_ALIGN8(sizeof(*lt)) * 2;
- m = key_alloc_mbuf(len);
- if (!m || m->m_next) { /*XXX*/
- if (m)
- m_freem(m);
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
+ if (m == NULL) {
error = ENOBUFS;
goto fail;
}
+ m_align(m, len);
+ m->m_len = len;
bzero(mtod(m, caddr_t), len);
lt = mtod(m, struct sadb_lifetime *);
lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
@@ -7959,45 +7940,6 @@ key_sa_stir_iv(sav)
key_randomfill(sav->iv, sav->ivlen);
}
-/* XXX too much? */
-static struct mbuf *
-key_alloc_mbuf(l)
- int l;
-{
- struct mbuf *m = NULL, *n;
- int len, t;
-
- len = l;
- while (len > 0) {
- MGET(n, M_NOWAIT, MT_DATA);
- if (n && len > MLEN)
- MCLGET(n, M_NOWAIT);
- if (!n) {
- m_freem(m);
- return NULL;
- }
-
- n->m_next = NULL;
- n->m_len = 0;
- n->m_len = M_TRAILINGSPACE(n);
- /* use the bottom of mbuf, hoping we can prepend afterwards */
- if (n->m_len > len) {
- t = (n->m_len - len) & ~(sizeof(long) - 1);
- n->m_data += t;
- n->m_len = len;
- }
-
- len -= n->m_len;
-
- if (m)
- m_cat(m, n);
- else
- m = n;
- }
-
- return m;
-}
-
/*
* Take one of the kernel's security keys and convert it into a PF_KEY
* structure within an mbuf, suitable for sending up to a waiting
@@ -8022,9 +7964,11 @@ key_setkey(struct seckey *src, u_int16_t exttype)
return NULL;
len = PFKEY_ALIGN8(sizeof(struct sadb_key) + _KEYLEN(src));
- m = key_alloc_mbuf(len);
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return NULL;
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_key *);
bzero(p, len);
p->sadb_key_len = PFKEY_UNIT64(len);
@@ -8059,9 +8003,11 @@ key_setlifetime(struct seclifetime *src, u_int16_t exttype)
if (src == NULL)
return NULL;
- m = key_alloc_mbuf(len);
+ m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return m;
+ m_align(m, len);
+ m->m_len = len;
p = mtod(m, struct sadb_lifetime *);
bzero(p, len);
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 6667d7a..168ad5c 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -2168,7 +2168,7 @@ pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
if (pfse == NULL)
return;
- m = m_gethdr(M_NOWAIT, MT_HEADER);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
free(pfse, M_PFTEMP);
return;
diff --git a/sys/nfs/nfs_common.c b/sys/nfs/nfs_common.c
index 83e7125..ad9e7a1 100644
--- a/sys/nfs/nfs_common.c
+++ b/sys/nfs/nfs_common.c
@@ -192,7 +192,7 @@ nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, int how)
} else if (siz > MHLEN) {
panic("nfs S too big");
} else {
- MGET(mp2, how, MT_DATA);
+ mp2 = m_get(how, MT_DATA);
if (mp2 == NULL)
return (NULL);
mp2->m_len = siz;
@@ -266,7 +266,7 @@ nfsm_build_xx(int s, struct mbuf **mb, caddr_t *bpos)
void *ret;
if (s > M_TRAILINGSPACE(*mb)) {
- MGET(mb2, M_WAITOK, MT_DATA);
+ mb2 = m_get(M_WAITOK, MT_DATA);
if (s > MLEN)
panic("build > MLEN");
(*mb)->m_next = mb2;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 6a339ca..ddd22ae 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -275,6 +275,8 @@ void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea_clear_modify(mmu_t, vm_page_t);
void moea_clear_reference(mmu_t, vm_page_t);
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize);
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
@@ -320,6 +322,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
MMUMETHOD(mmu_copy_page, moea_copy_page),
+ MMUMETHOD(mmu_copy_pages, moea_copy_pages),
MMUMETHOD(mmu_enter, moea_enter),
MMUMETHOD(mmu_enter_object, moea_enter_object),
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
@@ -1043,6 +1046,30 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
bcopy((void *)src, (void *)dst, PAGE_SIZE);
}
+void
+moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
+ a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
+ b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
/*
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 14b88f0..90466e8a 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -290,6 +290,8 @@ void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea64_clear_modify(mmu_t, vm_page_t);
void moea64_clear_reference(mmu_t, vm_page_t);
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize);
void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
@@ -334,6 +336,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
MMUMETHOD(mmu_copy_page, moea64_copy_page),
+ MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
MMUMETHOD(mmu_enter, moea64_enter),
MMUMETHOD(mmu_enter_object, moea64_enter_object),
MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
@@ -1104,6 +1107,72 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
}
}
+static inline void
+moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
+ a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
+ b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
+static inline void
+moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ mtx_lock(&moea64_scratchpage_mtx);
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ moea64_set_scratchpage_pa(mmu, 0,
+ VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+ a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ moea64_set_scratchpage_pa(mmu, 1,
+ VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+ b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ mtx_unlock(&moea64_scratchpage_mtx);
+}
+
+void
+moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+
+ if (hw_direct_map) {
+ moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
+ xfersize);
+ } else {
+ moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
+ xfersize);
+ }
+}
+
void
moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
{
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 343b046..b9f4838 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -274,6 +274,8 @@ static void mmu_booke_clear_reference(mmu_t, vm_page_t);
static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
vm_size_t, vm_offset_t);
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
+static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
+ vm_offset_t, vm_page_t *, vm_offset_t, int);
static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, boolean_t);
static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
@@ -334,6 +336,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
MMUMETHOD(mmu_copy, mmu_booke_copy),
MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
+ MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
MMUMETHOD(mmu_enter, mmu_booke_enter),
MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
@@ -2136,6 +2139,36 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
mtx_unlock(&copy_page_mutex);
}
+static inline void
+mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ mtx_lock(&copy_page_mutex);
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_src_va,
+ VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+ a_cp = (char *)copy_page_src_va + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_dst_va,
+ VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+ b_cp = (char *)copy_page_dst_va + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ mmu_booke_kremove(mmu, copy_page_dst_va);
+ mmu_booke_kremove(mmu, copy_page_src_va);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ mtx_unlock(&copy_page_mutex);
+}
+
/*
* mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
* into virtual memory and using bzero to clear its contents. This is intended
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 8cd6e52..0382bd8 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -215,6 +215,14 @@ METHOD void copy_page {
vm_page_t _dst;
};
+METHOD void copy_pages {
+ mmu_t _mmu;
+ vm_page_t *_ma;
+ vm_offset_t _a_offset;
+ vm_page_t *_mb;
+ vm_offset_t _b_offset;
+ int _xfersize;
+};
/**
* @brief Create a mapping between a virtual/physical address pair in the
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index c919196..42f1a39 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -133,6 +133,16 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
}
void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+
+ CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
+ a_offset, mb, b_offset, xfersize);
+ MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
+}
+
+void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
vm_prot_t prot, boolean_t wired)
{
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 8bfc454..97a085a 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1918,6 +1918,14 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
}
}
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+
+ panic("pmap_copy_pages: not implemented");
+}
+
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index 672ef5a..2261b56 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -508,9 +508,9 @@ void bufdone_finish(struct buf *);
void bd_speedup(void);
int cluster_read(struct vnode *, u_quad_t, daddr_t, long,
- struct ucred *, long, int, struct buf **);
-int cluster_wbuild(struct vnode *, long, daddr_t, int);
-void cluster_write(struct vnode *, struct buf *, u_quad_t, int);
+ struct ucred *, long, int, int, struct buf **);
+int cluster_wbuild(struct vnode *, long, daddr_t, int, int);
+void cluster_write(struct vnode *, struct buf *, u_quad_t, int, int);
void vfs_bio_set_valid(struct buf *, int base, int size);
void vfs_bio_clrbuf(struct buf *);
void vfs_busy_pages(struct buf *, int clear_modify);
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index a4b0f71..ec10bc6 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -195,7 +195,7 @@ struct mbuf {
#define M_FIRSTFRAG 0x00001000 /* packet is first fragment */
#define M_LASTFRAG 0x00002000 /* packet is last fragment */
#define M_SKIP_FIREWALL 0x00004000 /* skip firewall processing */
-#define M_FREELIST 0x00008000 /* mbuf is on the free list */
+ /* 0x00008000 free */
#define M_VLANTAG 0x00010000 /* ether_vtag is valid */
#define M_PROMISC 0x00020000 /* packet was not for us */
#define M_NOFREE 0x00040000 /* do not free mbuf, embedded in cluster */
@@ -708,6 +708,18 @@ m_last(struct mbuf *m)
} while (0)
/*
+ * As above, for mbuf with external storage.
+ */
+#define MEXT_ALIGN(m, len) do { \
+ KASSERT((m)->m_flags & M_EXT, \
+ ("%s: MEXT_ALIGN not an M_EXT mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_ext.ext_buf, \
+ ("%s: MEXT_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += ((m)->m_ext.ext_size - (len)) & \
+ ~(sizeof(long) - 1); \
+} while (0)
+
+/*
* Compute the amount of space available before the current start of data in
* an mbuf.
*
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 0696edd..bc16acf 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -693,6 +693,8 @@ int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags,
struct vnode **rvp);
int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio);
+int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
+ struct uio *uio);
#define vn_rangelock_unlock(vp, cookie) \
rangelock_unlock(&(vp)->v_rl, (cookie), VI_MTX(vp))
diff --git a/sys/ufs/ffs/ffs_balloc.c b/sys/ufs/ffs/ffs_balloc.c
index 0e29be87f..a5e99d9 100644
--- a/sys/ufs/ffs/ffs_balloc.c
+++ b/sys/ufs/ffs/ffs_balloc.c
@@ -418,7 +418,7 @@ retry:
if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
error = cluster_read(vp, ip->i_size, lbn,
(int)fs->fs_bsize, NOCRED,
- MAXBSIZE, seqcount, &nbp);
+ MAXBSIZE, seqcount, 0, &nbp);
} else {
error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
}
@@ -679,9 +679,9 @@ ffs_balloc_ufs2(struct vnode *vp, off_t startoffset, int size,
if (osize < fs->fs_bsize && osize > 0) {
UFS_LOCK(ump);
error = ffs_realloccg(ip, nb, dp->di_db[nb],
- ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
- &dp->di_db[0]), osize, (int)fs->fs_bsize,
- flags, cred, &bp);
+ ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
+ &dp->di_db[0]), osize, (int)fs->fs_bsize,
+ flags, cred, &bp);
if (error)
return (error);
if (DOINGSOFTDEP(vp))
@@ -733,7 +733,7 @@ ffs_balloc_ufs2(struct vnode *vp, off_t startoffset, int size,
UFS_LOCK(ump);
error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
ffs_blkpref_ufs2(ip, lbn, (int)lbn,
- &dp->di_db[0]), osize, nsize, flags,
+ &dp->di_db[0]), osize, nsize, flags,
cred, &bp);
if (error)
return (error);
@@ -966,7 +966,7 @@ retry:
if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
error = cluster_read(vp, ip->i_size, lbn,
(int)fs->fs_bsize, NOCRED,
- MAXBSIZE, seqcount, &nbp);
+ MAXBSIZE, seqcount, 0, &nbp);
} else {
error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
}
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index c065187..c310ece 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -519,7 +519,8 @@ ffs_read(ap)
* doing sequential access.
*/
error = cluster_read(vp, ip->i_size, lbn,
- size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
+ size, NOCRED, blkoffset + uio->uio_resid,
+ seqcount, 0, &bp);
} else if (seqcount > 1) {
/*
* If we are NOT allowed to cluster, then
@@ -784,7 +785,8 @@ ffs_write(ap)
} else if (xfersize + blkoffset == fs->fs_bsize) {
if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
bp->b_flags |= B_CLUSTEROK;
- cluster_write(vp, bp, ip->i_size, seqcount);
+ cluster_write(vp, bp, ip->i_size, seqcount,
+ 0);
} else {
bawrite(bp);
}
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index f69b1f9..574c59c 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -1554,6 +1554,7 @@ relock:
cache_purge(fvp);
if (tvp)
cache_purge(tvp);
+ cache_purge_negative(tdvp);
unlockout:
vput(fdvp);
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index d06c22b..c64a549 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -108,6 +108,8 @@ void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
+void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
+ vm_page_t mb[], vm_offset_t b_offset, int xfersize);
void pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t,
vm_prot_t, boolean_t);
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 9f602b7..64a2ebb 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -85,11 +85,11 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
-vm_map_t kernel_map=0;
-vm_map_t kmem_map=0;
-vm_map_t exec_map=0;
+vm_map_t kernel_map;
+vm_map_t kmem_map;
+vm_map_t exec_map;
vm_map_t pipe_map;
-vm_map_t buffer_map=0;
+vm_map_t buffer_map;
const void *zero_region;
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 5e331ee..a0340af 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -900,7 +900,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
}
bp = getpbuf(&vnode_pbuf_freecnt);
- kva = (vm_offset_t) bp->b_data;
+ kva = (vm_offset_t)bp->b_data;
/*
* and map the pages to be read into the kva
diff --git a/sys/xdr/xdr_mbuf.c b/sys/xdr/xdr_mbuf.c
index b2394bc..690c2d2 100644
--- a/sys/xdr/xdr_mbuf.c
+++ b/sys/xdr/xdr_mbuf.c
@@ -123,7 +123,7 @@ xdrmbuf_getall(XDR *xdrs)
if (m)
m_adj(m, xdrs->x_handy);
else
- MGET(m, M_WAITOK, MT_DATA);
+ m = m_get(M_WAITOK, MT_DATA);
return (m);
}
@@ -228,9 +228,10 @@ xdrmbuf_putbytes(XDR *xdrs, const char *addr, u_int len)
if (xdrs->x_handy == m->m_len && M_TRAILINGSPACE(m) == 0) {
if (!m->m_next) {
- MGET(n, M_WAITOK, m->m_type);
if (m->m_flags & M_EXT)
- MCLGET(n, M_WAITOK);
+ n = m_getcl(M_WAITOK, m->m_type, 0);
+ else
+ n = m_get(M_WAITOK, m->m_type);
m->m_next = n;
}
m = m->m_next;
OpenPOWER on IntegriCloud