summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorLuiz Otavio O Souza <luiz@netgate.com>2017-02-09 12:56:45 -0600
committerLuiz Otavio O Souza <luiz@netgate.com>2017-02-09 12:56:45 -0600
commit91168d55b893d0ff41403098765ffb6a890805a0 (patch)
tree429fd0d8430d5b8140c527d92b60fbb3fe2e2278 /sys
parentac4b1285e0aabc7a59f32412b4f2ca7674c5458f (diff)
parent9077a30ba0ad5458cceb3d0418b2f1ea7f70d556 (diff)
downloadFreeBSD-src-91168d55b893d0ff41403098765ffb6a890805a0.zip
FreeBSD-src-91168d55b893d0ff41403098765ffb6a890805a0.tar.gz
Merge remote-tracking branch 'origin/stable/11' into devel-11
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/mem.c7
-rw-r--r--sys/amd64/amd64/pmap.c85
-rw-r--r--sys/amd64/include/cpufunc.h7
-rw-r--r--sys/amd64/linux/linux_sysvec.c15
-rw-r--r--sys/amd64/vmm/vmm_dev.c2
-rw-r--r--sys/arm/arm/mem.c9
-rw-r--r--sys/arm/ti/cpsw/if_cpsw.c191
-rw-r--r--sys/arm/ti/cpsw/if_cpswreg.h1
-rw-r--r--sys/arm/ti/cpsw/if_cpswvar.h7
-rw-r--r--sys/arm64/arm64/mem.c9
-rw-r--r--sys/boot/arm/uboot/Makefile7
-rw-r--r--sys/boot/common/Makefile.inc9
-rw-r--r--sys/boot/common/bcache.c50
-rw-r--r--sys/boot/common/bootstrap.h6
-rw-r--r--sys/boot/common/disk.c6
-rw-r--r--sys/boot/common/interp_forth.c5
-rw-r--r--sys/boot/common/md.c4
-rwxr-xr-xsys/boot/common/newvers.sh21
-rw-r--r--sys/boot/common/reloc_elf.c1
-rw-r--r--sys/boot/efi/Makefile.inc4
-rw-r--r--sys/boot/efi/boot1/boot_module.h6
-rw-r--r--sys/boot/efi/include/efiapi.h1
-rw-r--r--sys/boot/efi/libefi/efipart.c17
-rw-r--r--sys/boot/efi/loader/Makefile5
-rw-r--r--sys/boot/efi/loader/arch/arm/ldscript.arm2
-rw-r--r--sys/boot/efi/loader/arch/arm/start.S2
-rw-r--r--sys/boot/efi/loader/main.c9
-rw-r--r--sys/boot/fdt/dts/arm/a83t.dtsi3
-rw-r--r--sys/boot/fdt/dts/arm/ufw.dts253
-rw-r--r--sys/boot/forth/loader.conf1
-rw-r--r--sys/boot/i386/btx/btxldr/btxldr.S8
-rw-r--r--sys/boot/i386/common/cons.c49
-rw-r--r--sys/boot/i386/libfirewire/firewire.c4
-rw-r--r--sys/boot/i386/libi386/bioscd.c10
-rw-r--r--sys/boot/i386/libi386/biosdisk.c14
-rw-r--r--sys/boot/i386/libi386/pxe.c6
-rw-r--r--sys/boot/i386/loader/Makefile7
-rw-r--r--sys/boot/i386/loader/main.c6
-rw-r--r--sys/boot/mips/beri/loader/Makefile6
-rw-r--r--sys/boot/mips/beri/loader/beri_disk_cfi.c6
-rw-r--r--sys/boot/mips/beri/loader/beri_disk_sdcard.c6
-rw-r--r--sys/boot/mips/beri/loader/loader.h3
-rw-r--r--sys/boot/mips/beri/loader/main.c4
-rw-r--r--sys/boot/mips/uboot/Makefile5
-rw-r--r--sys/boot/ofw/common/main.c9
-rw-r--r--sys/boot/ofw/libofw/ofw_disk.c6
-rw-r--r--sys/boot/pc98/libpc98/bioscd.c10
-rw-r--r--sys/boot/pc98/libpc98/biosdisk.c11
-rw-r--r--sys/boot/pc98/loader/Makefile6
-rw-r--r--sys/boot/pc98/loader/main.c6
-rw-r--r--sys/boot/powerpc/kboot/Makefile5
-rw-r--r--sys/boot/powerpc/kboot/hostdisk.c6
-rw-r--r--sys/boot/powerpc/kboot/main.c9
-rw-r--r--sys/boot/powerpc/ofw/Makefile5
-rw-r--r--sys/boot/powerpc/ps3/Makefile5
-rw-r--r--sys/boot/powerpc/ps3/main.c9
-rw-r--r--sys/boot/powerpc/ps3/ps3cdrom.c4
-rw-r--r--sys/boot/powerpc/ps3/ps3disk.c4
-rw-r--r--sys/boot/powerpc/uboot/Makefile5
-rw-r--r--sys/boot/sparc64/loader/Makefile7
-rw-r--r--sys/boot/sparc64/loader/main.c6
-rw-r--r--sys/boot/uboot/common/main.c9
-rw-r--r--sys/boot/uboot/lib/disk.c5
-rw-r--r--sys/boot/usb/storage/umass_loader.c7
-rw-r--r--sys/boot/userboot/userboot/Makefile5
-rw-r--r--sys/boot/userboot/userboot/host.c4
-rw-r--r--sys/boot/userboot/userboot/main.c9
-rw-r--r--sys/boot/userboot/userboot/userboot_disk.c14
-rw-r--r--sys/boot/zfs/zfs.c2
-rw-r--r--sys/cam/cam_ccb.h7
-rw-r--r--sys/cam/cam_periph.c5
-rw-r--r--sys/cam/ctl/ctl.c1598
-rw-r--r--sys/cam/ctl/ctl.h10
-rw-r--r--sys/cam/ctl/ctl_backend.c73
-rw-r--r--sys/cam/ctl/ctl_backend.h63
-rw-r--r--sys/cam/ctl/ctl_backend_block.c83
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c880
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c4
-rw-r--r--sys/cam/ctl/ctl_error.c14
-rw-r--r--sys/cam/ctl/ctl_error.h1
-rw-r--r--sys/cam/ctl/ctl_frontend.c77
-rw-r--r--sys/cam/ctl/ctl_frontend.h17
-rw-r--r--sys/cam/ctl/ctl_frontend_cam_sim.c71
-rw-r--r--sys/cam/ctl/ctl_frontend_ioctl.c51
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.c45
-rw-r--r--sys/cam/ctl/ctl_ha.c2
-rw-r--r--sys/cam/ctl/ctl_io.h20
-rw-r--r--sys/cam/ctl/ctl_ioctl.h97
-rw-r--r--sys/cam/ctl/ctl_private.h12
-rw-r--r--sys/cam/ctl/ctl_tpc.c159
-rw-r--r--sys/cam/ctl/ctl_tpc_local.c37
-rw-r--r--sys/cam/ctl/ctl_util.c4
-rw-r--r--sys/cam/ctl/ctl_util.h4
-rw-r--r--sys/cam/ctl/scsi_ctl.c153
-rw-r--r--sys/cam/nvme/nvme_da.c2
-rw-r--r--sys/cam/scsi/scsi_all.c66
-rw-r--r--sys/cam/scsi/scsi_all.h30
-rw-r--r--sys/cam/scsi/scsi_ch.c6
-rw-r--r--sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_misc.c52
-rw-r--r--sys/compat/freebsd32/freebsd32_proto.h4
-rw-r--r--sys/compat/freebsd32/freebsd32_syscall.h2
-rw-r--r--sys/compat/freebsd32/freebsd32_syscalls.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_sysent.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_systrace_args.c2
-rw-r--r--sys/compat/freebsd32/syscalls.master4
-rw-r--r--sys/compat/linux/linux_stats.c50
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cdev.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/device.h50
-rw-r--r--sys/compat/linuxkpi/common/include/linux/fs.h37
-rw-r--r--sys/compat/linuxkpi/common/include/linux/gfp.h4
-rw-r--r--sys/compat/linuxkpi/common/include/linux/list.h31
-rw-r--r--sys/compat/linuxkpi/common/src/linux_compat.c76
-rw-r--r--sys/compat/svr4/svr4_misc.c44
-rw-r--r--sys/conf/NOTES16
-rw-r--r--sys/conf/files2
-rw-r--r--sys/conf/kern.opts.mk3
-rw-r--r--sys/conf/kern.post.mk5
-rw-r--r--sys/conf/newvers.sh56
-rw-r--r--sys/conf/options1
-rw-r--r--sys/contrib/ipfilter/netinet/ip_fil.h8
-rw-r--r--sys/crypto/skein/amd64/skein_block_asm.s2
-rw-r--r--sys/dev/ahci/ahci.c21
-rw-r--r--sys/dev/ahci/ahci.h4
-rw-r--r--sys/dev/ahci/ahci_pci.c11
-rw-r--r--sys/dev/alc/if_alc.c32
-rw-r--r--sys/dev/alc/if_alcreg.h3
-rw-r--r--sys/dev/alc/if_alcvar.h3
-rw-r--r--sys/dev/cxgbe/t4_iov.c12
-rw-r--r--sys/dev/cxgbe/t4_main.c1
-rw-r--r--sys/dev/cxgbe/t4_sge.c5
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c20
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c31
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c3
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c45
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c138
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h8
-rw-r--r--sys/dev/drm2/i915/i915_gem.c67
-rw-r--r--sys/dev/etherswitch/etherswitch.c19
-rw-r--r--sys/dev/gpio/gpioc.c17
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c2
-rw-r--r--sys/dev/ichiic/ig4_acpi.c166
-rw-r--r--sys/dev/ichiic/ig4_iic.c10
-rw-r--r--sys/dev/ichiic/ig4_pci.c49
-rw-r--r--sys/dev/ichiic/ig4_var.h2
-rw-r--r--sys/dev/isci/isci_task_request.c5
-rw-r--r--sys/dev/ixgbe/if_ix.c1
-rw-r--r--sys/dev/kbd/kbd.c2
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c26
-rw-r--r--sys/dev/mmc/mmc.c22
-rw-r--r--sys/dev/mmc/mmcreg.h2
-rw-r--r--sys/dev/mmc/mmcsd.c3
-rw-r--r--sys/dev/mpr/mpr_sas.c7
-rw-r--r--sys/dev/mpr/mpr_sas_lsi.c15
-rw-r--r--sys/dev/nand/nand_geom.c2
-rw-r--r--sys/dev/netmap/netmap_freebsd.c30
-rw-r--r--sys/dev/netmap/netmap_generic.c123
-rw-r--r--sys/dev/netmap/netmap_kern.h8
-rw-r--r--sys/dev/ntb/if_ntb/if_ntb.c5
-rw-r--r--sys/dev/nvd/nvd.c6
-rw-r--r--sys/dev/pci/pci.c7
-rw-r--r--sys/dev/pci/pci_pci.c44
-rw-r--r--sys/dev/pci/pcib_private.h1
-rw-r--r--sys/dev/qlxgbe/ql_def.h15
-rw-r--r--sys/dev/qlxgbe/ql_glbl.h5
-rw-r--r--sys/dev/qlxgbe/ql_hw.c71
-rw-r--r--sys/dev/qlxgbe/ql_hw.h1
-rw-r--r--sys/dev/qlxgbe/ql_isr.c30
-rw-r--r--sys/dev/qlxgbe/ql_os.c512
-rw-r--r--sys/dev/qlxgbe/ql_os.h4
-rw-r--r--sys/dev/qlxgbe/ql_ver.h2
-rw-r--r--sys/dev/sdhci/sdhci.c147
-rw-r--r--sys/dev/sdhci/sdhci.h19
-rw-r--r--sys/dev/sdhci/sdhci_acpi.c370
-rw-r--r--sys/dev/sdhci/sdhci_if.m6
-rw-r--r--sys/dev/sdhci/sdhci_pci.c33
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.c24
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.h2
-rw-r--r--sys/dev/sfxge/sfxge.h3
-rw-r--r--sys/dev/sfxge/sfxge_port.c87
-rw-r--r--sys/dev/sfxge/sfxge_tx.c14
-rw-r--r--sys/dev/sound/pci/hda/hdaa_patches.c6
-rw-r--r--sys/dev/sound/pci/hda/hdac.h1
-rw-r--r--sys/dev/sound/pci/hda/hdacc.c1
-rw-r--r--sys/dev/usb/usb_hub.c25
-rw-r--r--sys/dev/usb/usb_process.c3
-rw-r--r--sys/fs/nfs/nfs_commonsubs.c42
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c4
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c39
-rw-r--r--sys/fs/nfsserver/nfs_nfsdserv.c13
-rw-r--r--sys/fs/nullfs/null_vfsops.c30
-rw-r--r--sys/fs/pseudofs/pseudofs_vncache.c13
-rw-r--r--sys/fs/tmpfs/tmpfs.h289
-rw-r--r--sys/fs/tmpfs/tmpfs_fifoops.c28
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c220
-rw-r--r--sys/fs/tmpfs/tmpfs_vfsops.c110
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c261
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.h1
-rw-r--r--sys/fs/unionfs/union_vfsops.c52
-rw-r--r--sys/geom/geom_disk.c12
-rw-r--r--sys/geom/geom_disk.h5
-rw-r--r--sys/geom/multipath/g_multipath.c8
-rw-r--r--sys/geom/vinum/geom_vinum_state.c2
-rw-r--r--sys/i386/i386/machdep.c3
-rw-r--r--sys/i386/i386/mem.c7
-rw-r--r--sys/i386/i386/pmap.c107
-rw-r--r--sys/i386/i386/vm_machdep.c6
-rw-r--r--sys/i386/ibcs2/ibcs2_stat.c25
-rw-r--r--sys/i386/include/cpufunc.h7
-rw-r--r--sys/i386/isa/npx.c10
-rw-r--r--sys/kern/imgact_elf.c41
-rw-r--r--sys/kern/init_sysent.c2
-rw-r--r--sys/kern/kern_acct.c13
-rw-r--r--sys/kern/kern_descrip.c12
-rw-r--r--sys/kern/kern_procctl.c2
-rw-r--r--sys/kern/kern_shutdown.c2
-rw-r--r--sys/kern/kern_switch.c15
-rw-r--r--sys/kern/sched_4bsd.c4
-rw-r--r--sys/kern/sched_ule.c4
-rw-r--r--sys/kern/subr_unit.c17
-rw-r--r--sys/kern/syscalls.c2
-rw-r--r--sys/kern/syscalls.master4
-rw-r--r--sys/kern/systrace_args.c2
-rw-r--r--sys/kern/uipc_mbuf.c2
-rw-r--r--sys/kern/uipc_mbuf2.c2
-rw-r--r--sys/kern/vfs_bio.c4
-rw-r--r--sys/kern/vfs_cache.c55
-rw-r--r--sys/kern/vfs_default.c15
-rw-r--r--sys/kern/vfs_mount.c1
-rw-r--r--sys/kern/vfs_subr.c30
-rw-r--r--sys/kern/vfs_syscalls.c133
-rw-r--r--sys/mips/mips/mem.c6
-rw-r--r--sys/modules/Makefile3
-rw-r--r--sys/modules/cam/Makefile1
-rw-r--r--sys/modules/i2c/controllers/ichiic/Makefile9
-rw-r--r--sys/modules/sdhci_acpi/Makefile9
-rw-r--r--sys/net/ieee8023ad_lacp.c7
-rw-r--r--sys/net/if_bridge.c6
-rw-r--r--sys/net/if_bridgevar.h1
-rw-r--r--sys/net/if_lagg.c2
-rw-r--r--sys/net/if_media.c1
-rw-r--r--sys/netgraph/ng_mppc.c7
-rw-r--r--sys/netinet/ip_input.c33
-rw-r--r--sys/netinet/tcp_hostcache.c5
-rw-r--r--sys/netinet/tcp_output.c4
-rw-r--r--sys/netinet6/in6_proto.c16
-rw-r--r--sys/netinet6/ip6_input.c95
-rw-r--r--sys/netipsec/ipsec.c17
-rw-r--r--sys/netpfil/ipfw/ip_fw_private.h2
-rw-r--r--sys/ofed/drivers/net/mlx4/main.c68
-rw-r--r--sys/powerpc/powerpc/mem.c2
-rw-r--r--sys/sparc64/sparc64/mem.c2
-rw-r--r--sys/sys/cdefs.h23
-rw-r--r--sys/sys/mount.h1
-rw-r--r--sys/sys/seq.h1
-rw-r--r--sys/sys/syscall.h2
-rw-r--r--sys/sys/syscall.mk2
-rw-r--r--sys/sys/syscallsubr.h2
-rw-r--r--sys/sys/sysproto.h6
-rw-r--r--sys/sys/unistd.h2
-rw-r--r--sys/tools/embed_mfs.sh6
-rw-r--r--sys/ufs/ffs/ffs_softdep.c8
-rw-r--r--sys/ufs/ufs/ufs_vnops.c4
-rw-r--r--sys/vm/device_pager.c21
-rw-r--r--sys/vm/phys_pager.c95
-rw-r--r--sys/vm/uma_core.c16
-rw-r--r--sys/vm/vm_domain.c3
-rw-r--r--sys/vm/vm_fault.c175
-rw-r--r--sys/vm/vm_map.c2
-rw-r--r--sys/vm/vm_object.h5
-rw-r--r--sys/vm/vm_pager.h19
-rw-r--r--sys/vm/vnode_pager.c2
-rw-r--r--sys/x86/acpica/acpi_wakeup.c87
-rw-r--r--sys/x86/x86/mca.c8
274 files changed, 6164 insertions, 3736 deletions
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index 7325664..c2e74ff 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -172,10 +172,9 @@ memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
if (offset > cpu_getmaxphyaddr())
return (-1);
*paddr = offset;
- } else if (dev2unit(dev) == CDEV_MINOR_KMEM)
- *paddr = vtophys(offset);
- /* else panic! */
- return (0);
+ return (0);
+ }
+ return (-1);
}
/*
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index ae525da..2d88398 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1041,7 +1041,12 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
virtual_avail = va;
- /* Initialize the PAT MSR. */
+ /*
+ * Initialize the PAT MSR.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment.
+ */
pmap_init_pat();
/* Initialize TLB Context Id. */
@@ -1863,16 +1868,16 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
return;
/*
- * Otherwise, do per-cache line flush. Use the mfence
+ * Otherwise, do per-cache line flush. Use the sfence
* instruction to insure that previous stores are
* included in the write-back. The processor
* propagates flush to other processors in the cache
* coherence domain.
*/
- mfence();
+ sfence();
for (; sva < eva; sva += cpu_clflush_line_size)
clflushopt(sva);
- mfence();
+ sfence();
} else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
if (pmap_kextract(sva) == lapic_paddr)
@@ -1916,7 +1921,9 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
pmap_invalidate_cache();
else {
- if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
for (i = 0; i < count; i++) {
daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
@@ -1928,7 +1935,9 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
clflush(daddr);
}
}
- if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
}
}
@@ -3437,6 +3446,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
vm_paddr_t mptepa;
vm_page_t mpte;
struct spglist free;
+ vm_offset_t sva;
int PG_PTE_CACHE;
PG_G = pmap_global_bit(pmap);
@@ -3475,9 +3485,9 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
- pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
- lockp);
- pmap_invalidate_page(pmap, trunc_2mpage(va));
+ sva = trunc_2mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free, lockp);
+ pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -3620,11 +3630,23 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
/*
- * Machines that don't support invlpg, also don't support
- * PG_G.
+ * When workaround_erratum383 is false, a promotion to a 2M
+ * page mapping does not invalidate the 512 4K page mappings
+ * from the TLB. Consequently, at this point, the TLB may
+ * hold both 4K and 2M page mappings. Therefore, the entire
+ * range of addresses must be invalidated here. In contrast,
+ * when workaround_erratum383 is true, a promotion does
+ * invalidate the 512 4K page mappings, and so a single INVLPG
+ * suffices to invalidate the 2M page mapping.
*/
- if (oldpde & PG_G)
- pmap_invalidate_page(kernel_pmap, sva);
+ if ((oldpde & PG_G) != 0) {
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ }
+
pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
if (oldpde & PG_MANAGED) {
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
@@ -4007,9 +4029,14 @@ retry:
if (newpde != oldpde) {
if (!atomic_cmpset_long(pde, oldpde, newpde))
goto retry;
- if (oldpde & PG_G)
- pmap_invalidate_page(pmap, sva);
- else
+ if (oldpde & PG_G) {
+ /* See pmap_remove_pde() for explanation. */
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ } else
anychanged = TRUE;
}
return (anychanged);
@@ -6054,7 +6081,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
pdp_entry_t *pdpe;
pd_entry_t oldpde, *pde;
pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
- vm_offset_t va_next;
+ vm_offset_t va, va_next;
vm_page_t m;
boolean_t anychanged;
@@ -6134,11 +6161,11 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
}
if (va_next > eva)
va_next = eva;
+ va = va_next;
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
sva += PAGE_SIZE) {
- if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED |
- PG_V))
- continue;
+ if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
+ goto maybe_invlrng;
else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
if (advice == MADV_DONTNEED) {
/*
@@ -6153,12 +6180,22 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
} else if ((*pte & PG_A) != 0)
atomic_clear_long(pte, PG_A);
else
- continue;
- if ((*pte & PG_G) != 0)
- pmap_invalidate_page(pmap, sva);
- else
+ goto maybe_invlrng;
+
+ if ((*pte & PG_G) != 0) {
+ if (va == va_next)
+ va = sva;
+ } else
anychanged = TRUE;
+ continue;
+maybe_invlrng:
+ if (va != va_next) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = va_next;
+ }
}
+ if (va != va_next)
+ pmap_invalidate_range(pmap, va, sva);
}
if (anychanged)
pmap_invalidate_all(pmap);
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 4b7df46..5fa0d77 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -327,6 +327,13 @@ mfence(void)
}
static __inline void
+sfence(void)
+{
+
+ __asm __volatile("sfence" : : : "memory");
+}
+
+static __inline void
ia32_pause(void)
{
__asm __volatile("pause");
diff --git a/sys/amd64/linux/linux_sysvec.c b/sys/amd64/linux/linux_sysvec.c
index c9f8302..6e12d41 100644
--- a/sys/amd64/linux/linux_sysvec.c
+++ b/sys/amd64/linux/linux_sysvec.c
@@ -718,7 +718,7 @@ exec_linux_imgact_try(struct image_params *imgp)
{
const char *head = (const char *)imgp->image_header;
char *rpath;
- int error = -1, len;
+ int error = -1;
/*
* The interpreter for shell scripts run from a linux binary needs
@@ -736,17 +736,12 @@ exec_linux_imgact_try(struct image_params *imgp)
linux_emul_convpath(FIRST_THREAD_IN_PROC(imgp->proc),
imgp->interpreter_name, UIO_SYSSPACE,
&rpath, 0, AT_FDCWD);
- if (rpath != NULL) {
- len = strlen(rpath) + 1;
-
- if (len <= MAXSHELLCMDLEN)
- memcpy(imgp->interpreter_name,
- rpath, len);
- free(rpath, M_TEMP);
- }
+ if (rpath != NULL)
+ imgp->args->fname_buf =
+ imgp->interpreter_name = rpath;
}
}
- return(error);
+ return (error);
}
#define LINUX_VSYSCALL_START (-10UL << 20)
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index 5cb4150..53a8bdc 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -258,7 +258,7 @@ alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg)
if (VM_MEMSEG_NAME(mseg)) {
sysmem = false;
name = malloc(SPECNAMELEN + 1, M_VMMDEV, M_WAITOK);
- error = copystr(VM_MEMSEG_NAME(mseg), name, SPECNAMELEN + 1, 0);
+ error = copystr(mseg->name, name, SPECNAMELEN + 1, 0);
if (error)
goto done;
}
diff --git a/sys/arm/arm/mem.c b/sys/arm/arm/mem.c
index 096988d..c2cf320 100644
--- a/sys/arm/arm/mem.c
+++ b/sys/arm/arm/mem.c
@@ -161,10 +161,9 @@ int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot __unused, vm_memattr_t *memattr __unused)
{
- if (dev2unit(dev) == CDEV_MINOR_MEM)
+ if (dev2unit(dev) == CDEV_MINOR_MEM) {
*paddr = offset;
- else if (dev2unit(dev) == CDEV_MINOR_KMEM)
- *paddr = vtophys(offset);
- /* else panic! */
- return (0);
+ return (0);
+ }
+ return (-1);
}
diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c
index 8a72aa1..3feeddd 100644
--- a/sys/arm/ti/cpsw/if_cpsw.c
+++ b/sys/arm/ti/cpsw/if_cpsw.c
@@ -784,8 +784,7 @@ cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
static int
cpsw_attach(device_t dev)
{
- bus_dma_segment_t segs[1];
- int error, i, nsegs;
+ int error, i;
struct cpsw_softc *sc;
uint32_t reg;
@@ -860,15 +859,8 @@ cpsw_attach(device_t dev)
return (error);
}
- /* Allocate the null mbuf and pre-sync it. */
- sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
- memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size);
- bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
- bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
- sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
- bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
- BUS_DMASYNC_PREWRITE);
- sc->null_mbuf_paddr = segs[0].ds_addr;
+ /* Allocate a NULL buffer for padding. */
+ sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO);
cpsw_init_slots(sc);
@@ -947,13 +939,9 @@ cpsw_detach(device_t dev)
for (i = 0; i < nitems(sc->_slots); ++i)
cpsw_free_slot(sc, &sc->_slots[i]);
- /* Free null mbuf. */
- if (sc->null_mbuf_dmamap) {
- bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap);
- error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap);
- KASSERT(error == 0, ("Mapping still active"));
- m_freem(sc->null_mbuf);
- }
+ /* Free null padding buffer. */
+ if (sc->nullpad)
+ free(sc->nullpad, M_DEVBUF);
/* Free DMA tag */
if (sc->mbuf_dtag) {
@@ -1396,6 +1384,16 @@ cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifr = (struct ifreq *)data;
switch (command) {
+ case SIOCSIFCAP:
+ changed = ifp->if_capenable ^ ifr->ifr_reqcap;
+ if (changed & IFCAP_HWCSUM) {
+ if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
+ ifp->if_capenable |= IFCAP_HWCSUM;
+ else
+ ifp->if_capenable &= ~IFCAP_HWCSUM;
+ }
+ error = 0;
+ break;
case SIOCSIFFLAGS:
CPSW_PORT_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
@@ -1585,14 +1583,19 @@ cpsw_intr_rx(void *arg)
static struct mbuf *
cpsw_rx_dequeue(struct cpsw_softc *sc)
{
+ int nsegs, port, removed;
struct cpsw_cpdma_bd bd;
struct cpsw_slot *last, *slot;
struct cpswp_softc *psc;
- struct mbuf *mb_head, *mb_tail;
- int port, removed = 0;
+ struct mbuf *m, *m0, *mb_head, *mb_tail;
+ uint16_t m0_flags;
+ nsegs = 0;
+ m0 = NULL;
last = NULL;
- mb_head = mb_tail = NULL;
+ mb_head = NULL;
+ mb_tail = NULL;
+ removed = 0;
/* Pull completed packets off hardware RX queue. */
while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
@@ -1615,10 +1618,12 @@ cpsw_rx_dequeue(struct cpsw_softc *sc)
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
+ m = slot->mbuf;
+ slot->mbuf = NULL;
+
if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
CPSW_DEBUGF(sc, ("RX teardown is complete"));
- m_freem(slot->mbuf);
- slot->mbuf = NULL;
+ m_freem(m);
sc->rx.running = 0;
sc->rx.teardown = 0;
break;
@@ -1630,34 +1635,63 @@ cpsw_rx_dequeue(struct cpsw_softc *sc)
psc = device_get_softc(sc->port[port].dev);
/* Set up mbuf */
- /* TODO: track SOP/EOP bits to assemble a full mbuf
- out of received fragments. */
- slot->mbuf->m_data += bd.bufoff;
- slot->mbuf->m_len = bd.pktlen - 4;
- slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
- slot->mbuf->m_flags |= M_PKTHDR;
- slot->mbuf->m_pkthdr.rcvif = psc->ifp;
- slot->mbuf->m_nextpkt = NULL;
+ m->m_data += bd.bufoff;
+ m->m_len = bd.buflen;
+ if (bd.flags & CPDMA_BD_SOP) {
+ m->m_pkthdr.len = bd.pktlen;
+ m->m_pkthdr.rcvif = psc->ifp;
+ m->m_flags |= M_PKTHDR;
+ m0_flags = bd.flags;
+ m0 = m;
+ }
+ nsegs++;
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ if (bd.flags & CPDMA_BD_EOP && m0 != NULL) {
+ if (m0_flags & CPDMA_BD_PASS_CRC)
+ m_adj(m0, -ETHER_CRC_LEN);
+ m0_flags = 0;
+ m0 = NULL;
+ if (nsegs > sc->rx.longest_chain)
+ sc->rx.longest_chain = nsegs;
+ nsegs = 0;
+ }
if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) {
/* check for valid CRC by looking into pkt_err[5:4] */
- if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
- slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
- slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
- slot->mbuf->m_pkthdr.csum_data = 0xffff;
+ if ((bd.flags &
+ (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) ==
+ CPDMA_BD_SOP) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ m->m_pkthdr.csum_data = 0xffff;
}
}
+ if (STAILQ_FIRST(&sc->rx.active) != NULL &&
+ (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
+ (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
+ cpsw_write_hdp_slot(sc, &sc->rx,
+ STAILQ_FIRST(&sc->rx.active));
+ sc->rx.queue_restart++;
+ }
+
/* Add mbuf to packet list to be returned. */
- if (mb_tail) {
- mb_tail->m_nextpkt = slot->mbuf;
+ if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) {
+ mb_tail->m_nextpkt = m;
+ } else if (mb_tail != NULL) {
+ mb_tail->m_next = m;
+ } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) {
+ if (bootverbose)
+ printf(
+ "%s: %s: discanding fragment packet w/o header\n",
+ __func__, psc->ifp->if_xname);
+ m_freem(m);
+ continue;
} else {
- mb_head = slot->mbuf;
+ mb_head = m;
}
- mb_tail = slot->mbuf;
- slot->mbuf = NULL;
- if (sc->rx_batch > 0 && sc->rx_batch == removed)
- break;
+ mb_tail = m;
}
if (removed != 0) {
@@ -1680,7 +1714,6 @@ cpsw_rx_enqueue(struct cpsw_softc *sc)
struct cpsw_cpdma_bd bd;
struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot;
int error, nsegs, added = 0;
- uint32_t flags;
/* Register new mbufs with hardware. */
first_new_slot = NULL;
@@ -1746,22 +1779,13 @@ cpsw_rx_enqueue(struct cpsw_softc *sc)
} else {
/* Add buffers to end of current queue. */
cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
- /* If underrun, restart queue. */
- if ((flags = cpsw_cpdma_read_bd_flags(sc, last_old_slot)) &
- CPDMA_BD_EOQ) {
- flags &= ~CPDMA_BD_EOQ;
- cpsw_cpdma_write_bd_flags(sc, last_old_slot, flags);
- cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
- sc->rx.queue_restart++;
- }
}
sc->rx.queue_adds += added;
sc->rx.avail_queue_len -= added;
sc->rx.active_queue_len += added;
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added);
- if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
+ if (sc->rx.active_queue_len > sc->rx.max_active_queue_len)
sc->rx.max_active_queue_len = sc->rx.active_queue_len;
- }
}
static void
@@ -1801,13 +1825,8 @@ cpswp_tx_enqueue(struct cpswp_softc *sc)
struct cpsw_cpdma_bd bd;
struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot;
struct mbuf *m0;
- int error, flags, nsegs, seg, added = 0, padlen;
+ int error, nsegs, seg, added = 0, padlen;
- flags = 0;
- if (sc->swsc->dualemac) {
- flags = CPDMA_BD_TO_PORT |
- ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
- }
/* Pull pending packets from IF queue and prep them for DMA. */
last = NULL;
first_new_slot = NULL;
@@ -1818,21 +1837,19 @@ cpswp_tx_enqueue(struct cpswp_softc *sc)
break;
slot->mbuf = m0;
- padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
+ padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len;
if (padlen < 0)
padlen = 0;
+ else if (padlen > 0)
+ m_append(slot->mbuf, padlen, sc->swsc->nullpad);
/* Create mapping in DMA memory */
error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
/* If the packet is too fragmented, try to simplify. */
if (error == EFBIG ||
- (error == 0 &&
- nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) {
+ (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) {
bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
- if (padlen > 0) /* May as well add padding. */
- m_append(slot->mbuf, padlen,
- sc->swsc->null_mbuf->m_data);
m0 = m_defrag(slot->mbuf, M_NOWAIT);
if (m0 == NULL) {
device_printf(sc->dev,
@@ -1884,8 +1901,12 @@ cpswp_tx_enqueue(struct cpswp_softc *sc)
bd.bufptr = segs[0].ds_addr;
bd.bufoff = 0;
bd.buflen = segs[0].ds_len;
- bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
- bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER | flags;
+ bd.pktlen = m_length(slot->mbuf, NULL);
+ bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER;
+ if (sc->swsc->dualemac) {
+ bd.flags |= CPDMA_BD_TO_PORT;
+ bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
+ }
for (seg = 1; seg < nsegs; ++seg) {
/* Save the previous buffer (which isn't EOP) */
cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
@@ -1903,44 +1924,20 @@ cpswp_tx_enqueue(struct cpswp_softc *sc)
bd.bufoff = 0;
bd.buflen = segs[seg].ds_len;
bd.pktlen = 0;
- bd.flags = CPDMA_BD_OWNER | flags;
+ bd.flags = CPDMA_BD_OWNER;
}
+
/* Save the final buffer. */
- if (padlen <= 0)
- bd.flags |= CPDMA_BD_EOP;
- else {
- next = STAILQ_NEXT(slot, next);
- bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
- }
+ bd.flags |= CPDMA_BD_EOP;
cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
- if (padlen > 0) {
- slot = STAILQ_FIRST(&sc->swsc->tx.avail);
-
- /* Setup buffer of null pad bytes (definitely EOP). */
- bd.next = 0;
- bd.bufptr = sc->swsc->null_mbuf_paddr;
- bd.bufoff = 0;
- bd.buflen = padlen;
- bd.pktlen = 0;
- bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags;
- cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
- ++nsegs;
-
- STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
- STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
- }
-
last = slot;
-
added += nsegs;
if (nsegs > sc->swsc->tx.longest_chain)
sc->swsc->tx.longest_chain = nsegs;
- // TODO: Should we defer the BPF tap until
- // after all packets are queued?
BPF_MTAP(sc->ifp, m0);
}
@@ -1985,7 +1982,8 @@ cpsw_tx_dequeue(struct cpsw_softc *sc)
sc->tx.teardown = 1;
}
- if ((flags & CPDMA_BD_OWNER) != 0 && sc->tx.teardown == 0)
+ if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) ==
+ (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0)
break; /* Hardware is still using this packet. */
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
@@ -2711,9 +2709,6 @@ cpsw_add_sysctls(struct cpsw_softc *sc)
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
- SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "rx_batch",
- CTLFLAG_RW, &sc->rx_batch, 0, "Set the rx batch size");
-
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
"Time since driver attach");
diff --git a/sys/arm/ti/cpsw/if_cpswreg.h b/sys/arm/ti/cpsw/if_cpswreg.h
index 6d6a647..c0ee358 100644
--- a/sys/arm/ti/cpsw/if_cpswreg.h
+++ b/sys/arm/ti/cpsw/if_cpswreg.h
@@ -191,6 +191,7 @@
#define CPDMA_BD_OWNER (1 << 13)
#define CPDMA_BD_EOQ (1 << 12)
#define CPDMA_BD_TDOWNCMPLT (1 << 11)
+#define CPDMA_BD_PASS_CRC (1 << 10)
#define CPDMA_BD_PKT_ERR_MASK (3 << 4)
#define CPDMA_BD_TO_PORT (1 << 4)
#define CPDMA_BD_PORT_MASK 3
diff --git a/sys/arm/ti/cpsw/if_cpswvar.h b/sys/arm/ti/cpsw/if_cpswvar.h
index f037dd5..003af22 100644
--- a/sys/arm/ti/cpsw/if_cpswvar.h
+++ b/sys/arm/ti/cpsw/if_cpswvar.h
@@ -89,7 +89,6 @@ struct cpsw_softc {
int active_slave;
int debug;
int dualemac;
- int rx_batch;
phandle_t node;
struct bintime attach_uptime; /* system uptime when attach happened. */
struct cpsw_port port[2];
@@ -104,10 +103,8 @@ struct cpsw_softc {
struct resource *irq_res[CPSW_INTR_COUNT];
void *ih_cookie[CPSW_INTR_COUNT];
- /* An mbuf full of nulls for TX padding. */
- bus_dmamap_t null_mbuf_dmamap;
- struct mbuf *null_mbuf;
- bus_addr_t null_mbuf_paddr;
+ /* A buffer full of nulls for TX padding. */
+ void *nullpad;
bus_dma_tag_t mbuf_dtag;
diff --git a/sys/arm64/arm64/mem.c b/sys/arm64/arm64/mem.c
index 9044fb3..e0ea488 100644
--- a/sys/arm64/arm64/mem.c
+++ b/sys/arm64/arm64/mem.c
@@ -123,10 +123,9 @@ int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot __unused, vm_memattr_t *memattr __unused)
{
- if (dev2unit(dev) == CDEV_MINOR_MEM)
+ if (dev2unit(dev) == CDEV_MINOR_MEM) {
*paddr = offset;
- else if (dev2unit(dev) == CDEV_MINOR_KMEM)
- *paddr = vtophys(offset);
- /* else panic! */
- return (0);
+ return (0);
+ }
+ return (-1);
}
diff --git a/sys/boot/arm/uboot/Makefile b/sys/boot/arm/uboot/Makefile
index 8b4b8ca..4bf4068 100644
--- a/sys/boot/arm/uboot/Makefile
+++ b/sys/boot/arm/uboot/Makefile
@@ -90,7 +90,7 @@ LIBFICL= ${.OBJDIR}/../../ficl/libficl.a
CFLAGS+= -I${.CURDIR}/../../common
CFLAGS+= -I.
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
CFLAGS+= -ffreestanding -msoft-float
@@ -109,6 +109,8 @@ CFLAGS+= -I${.OBJDIR}/../../uboot/lib
# where to get libstand from
CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/
+CFLAGS+= -fPIC
+
# clang doesn't understand %D as a specifier to printf
NO_WERROR.clang=
@@ -117,9 +119,6 @@ LDADD= ${LIBFICL} ${LIBUBOOT} ${LIBFDT} ${LIBUBOOT_FDT} -lstand
OBJS+= ${SRCS:N*.h:R:S/$/.o/g}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
loader.help: help.common help.uboot ${.CURDIR}/../../fdt/help.fdt
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/common/Makefile.inc b/sys/boot/common/Makefile.inc
index 480279d..eb17549 100644
--- a/sys/boot/common/Makefile.inc
+++ b/sys/boot/common/Makefile.inc
@@ -70,3 +70,12 @@ CFLAGS+= -DBOOT_PROMPT_123
SRCS+= install.c
CFLAGS+=-I${.CURDIR}/../../../../lib/libstand
.endif
+
+CLEANFILES+= vers.c
+VERSION_FILE?= ${.CURDIR}/version
+.if ${MK_REPRODUCIBLE_BUILD} != no
+REPRO_FLAG= -r
+.endif
+vers.c: ${SRCTOP}/sys/boot/common/newvers.sh ${VERSION_FILE}
+ sh ${SRCTOP}/sys/boot/common/newvers.sh ${REPRO_FLAG} ${VERSION_FILE} \
+ ${NEWVERSWHAT}
diff --git a/sys/boot/common/bcache.c b/sys/boot/common/bcache.c
index 4bb9082..3023db0 100644
--- a/sys/boot/common/bcache.c
+++ b/sys/boot/common/bcache.c
@@ -182,8 +182,8 @@ bcache_free(void *cache)
* cache with the new values.
*/
static int
-write_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+write_strategy(void *devdata, int rw, daddr_t blk, size_t size,
+ char *buf, size_t *rsize)
{
struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
struct bcache *bc = dd->dv_cache;
@@ -197,7 +197,7 @@ write_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
}
/* Write the blocks */
- return (dd->dv_strategy(dd->dv_devdata, rw, blk, offset, size, buf, rsize));
+ return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
}
/*
@@ -206,8 +206,8 @@ write_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
* device I/O and then use the I/O results to populate the cache.
*/
static int
-read_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+read_strategy(void *devdata, int rw, daddr_t blk, size_t size,
+ char *buf, size_t *rsize)
{
struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
struct bcache *bc = dd->dv_cache;
@@ -225,7 +225,7 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
*rsize = 0;
nblk = size / bcache_blksize;
- if ((nblk == 0 && size != 0) || offset != 0)
+ if (nblk == 0 && size != 0)
nblk++;
result = 0;
complete = 1;
@@ -246,8 +246,7 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
if (complete) { /* whole set was in cache, return it */
if (bc->ra < BCACHE_READAHEAD)
bc->ra <<= 1; /* increase read ahead */
- bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)) + offset,
- buf, size);
+ bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
goto done;
}
@@ -282,7 +281,7 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
* in either case we should return the data in bcache and only
* return error if there is no data.
*/
- result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, 0,
+ result = dd->dv_strategy(dd->dv_devdata, rw, p_blk,
p_size * bcache_blksize, p_buf, &r_size);
r_size /= bcache_blksize;
@@ -305,8 +304,7 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
size = i * bcache_blksize;
if (size != 0) {
- bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)) + offset,
- buf, size);
+ bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
result = 0;
}
@@ -321,8 +319,8 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
* directly to the disk. XXX tune this.
*/
int
-bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size,
+ char *buf, size_t *rsize)
{
struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
struct bcache *bc = dd->dv_cache;
@@ -337,23 +335,16 @@ bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
/* bypass large requests, or when the cache is inactive */
if (bc == NULL ||
- (offset == 0 && ((size * 2 / bcache_blksize) > bcache_nblks))) {
+ ((size * 2 / bcache_blksize) > bcache_nblks)) {
DEBUG("bypass %d from %d", size / bcache_blksize, blk);
bcache_bypasses++;
- return (dd->dv_strategy(dd->dv_devdata, rw, blk, offset, size, buf,
- rsize));
- }
-
- /* normalize offset */
- while (offset >= bcache_blksize) {
- blk++;
- offset -= bcache_blksize;
+ return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
}
switch (rw) {
case F_READ:
nblk = size / bcache_blksize;
- if (offset || (size != 0 && nblk == 0))
+ if (size != 0 && nblk == 0)
nblk++; /* read at least one block */
ret = 0;
@@ -364,14 +355,10 @@ bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
if (size <= bcache_blksize)
csize = size;
- else {
+ else
csize = cblk * bcache_blksize;
- if (offset)
- csize -= (bcache_blksize - offset);
- }
- ret = read_strategy(devdata, rw, blk, offset,
- csize, buf+total, &isize);
+ ret = read_strategy(devdata, rw, blk, csize, buf+total, &isize);
/*
* we may have error from read ahead, if we have read some data
@@ -382,8 +369,7 @@ bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
ret = 0;
break;
}
- blk += (offset+isize) / bcache_blksize;
- offset = 0;
+ blk += isize / bcache_blksize;
total += isize;
size -= isize;
nblk = size / bcache_blksize;
@@ -394,7 +380,7 @@ bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
return (ret);
case F_WRITE:
- return write_strategy(devdata, rw, blk, offset, size, buf, rsize);
+ return write_strategy(devdata, rw, blk, size, buf, rsize);
}
return -1;
}
diff --git a/sys/boot/common/bootstrap.h b/sys/boot/common/bootstrap.h
index e15fc6a..472fc3e 100644
--- a/sys/boot/common/bootstrap.h
+++ b/sys/boot/common/bootstrap.h
@@ -76,8 +76,8 @@ void bcache_init(u_int nblks, size_t bsize);
void bcache_add_dev(int);
void *bcache_allocate(void);
void bcache_free(void *);
-int bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
- size_t size, char *buf, size_t *rsize);
+int bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size,
+ char *buf, size_t *rsize);
/*
* Disk block cache
@@ -85,7 +85,7 @@ int bcache_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
struct bcache_devdata
{
int (*dv_strategy)(void *devdata, int rw, daddr_t blk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
void *dv_devdata;
void *dv_cache;
};
diff --git a/sys/boot/common/disk.c b/sys/boot/common/disk.c
index 54626f7..feb0b9d 100644
--- a/sys/boot/common/disk.c
+++ b/sys/boot/common/disk.c
@@ -178,7 +178,7 @@ ptblread(void *d, void *buf, size_t blocks, off_t offset)
dev = (struct disk_devdesc *)d;
od = (struct open_disk *)dev->d_opendata;
- return (dev->d_dev->dv_strategy(dev, F_READ, offset, 0,
+ return (dev->d_dev->dv_strategy(dev, F_READ, offset,
blocks * od->sectorsize, (char *)buf, NULL));
}
@@ -244,7 +244,7 @@ disk_read(struct disk_devdesc *dev, void *buf, off_t offset, u_int blocks)
int ret;
od = (struct open_disk *)dev->d_opendata;
- ret = dev->d_dev->dv_strategy(dev, F_READ, dev->d_offset + offset, 0,
+ ret = dev->d_dev->dv_strategy(dev, F_READ, dev->d_offset + offset,
blocks * od->sectorsize, buf, NULL);
return (ret);
@@ -257,7 +257,7 @@ disk_write(struct disk_devdesc *dev, void *buf, off_t offset, u_int blocks)
int ret;
od = (struct open_disk *)dev->d_opendata;
- ret = dev->d_dev->dv_strategy(dev, F_WRITE, dev->d_offset + offset, 0,
+ ret = dev->d_dev->dv_strategy(dev, F_WRITE, dev->d_offset + offset,
blocks * od->sectorsize, buf, NULL);
return (ret);
diff --git a/sys/boot/common/interp_forth.c b/sys/boot/common/interp_forth.c
index aedac6a..c5763b6 100644
--- a/sys/boot/common/interp_forth.c
+++ b/sys/boot/common/interp_forth.c
@@ -33,7 +33,7 @@ __FBSDID("$FreeBSD$");
#include "bootstrap.h"
#include "ficl.h"
-extern char bootprog_rev[];
+extern unsigned bootprog_rev;
/* #define BFORTH_DEBUG */
@@ -278,8 +278,7 @@ bf_init(const char *rc)
/* Export some version numbers so that code can detect the loader/host version */
ficlSetEnv(bf_sys, "FreeBSD_version", __FreeBSD_version);
- ficlSetEnv(bf_sys, "loader_version",
- (bootprog_rev[0] - '0') * 10 + (bootprog_rev[2] - '0'));
+ ficlSetEnv(bf_sys, "loader_version", bootprog_rev);
pInterp = ficlLookup(bf_sys, "interpret");
diff --git a/sys/boot/common/md.c b/sys/boot/common/md.c
index e5e8a48..2554b04 100644
--- a/sys/boot/common/md.c
+++ b/sys/boot/common/md.c
@@ -60,7 +60,7 @@ static struct {
/* devsw I/F */
static int md_init(void);
-static int md_strategy(void *, int, daddr_t, size_t, size_t, char *, size_t *);
+static int md_strategy(void *, int, daddr_t, size_t, char *, size_t *);
static int md_open(struct open_file *, ...);
static int md_close(struct open_file *);
static void md_print(int);
@@ -84,7 +84,7 @@ md_init(void)
}
static int
-md_strategy(void *devdata, int rw, daddr_t blk, size_t offset, size_t size,
+md_strategy(void *devdata, int rw, daddr_t blk, size_t size,
char *buf, size_t *rsize)
{
struct devdesc *dev = (struct devdesc *)devdata;
diff --git a/sys/boot/common/newvers.sh b/sys/boot/common/newvers.sh
index ee2ac99..9547c85 100755
--- a/sys/boot/common/newvers.sh
+++ b/sys/boot/common/newvers.sh
@@ -35,13 +35,26 @@
tempfile=$(mktemp tmp.XXXXXX) || exit
trap "rm -f $tempfile" EXIT INT TERM
+include_metadata=true
+while getopts r opt; do
+ case "$opt" in
+ r)
+ include_metadata=
+ ;;
+ esac
+done
+shift $((OPTIND - 1))
+
LC_ALL=C; export LC_ALL
u=${USER-root} h=${HOSTNAME-`hostname`} t=`date`
#r=`head -n 6 $1 | tail -n 1 | awk -F: ' { print $1 } '`
r=`awk -F: ' /^[0-9]\.[0-9]+:/ { print $1; exit }' $1`
-echo "char bootprog_name[] = \"FreeBSD/${3} ${2}\";" > $tempfile
-echo "char bootprog_rev[] = \"${r}\";" >> $tempfile
-echo "char bootprog_date[] = \"${t}\";" >> $tempfile
-echo "char bootprog_maker[] = \"${u}@${h}\";" >> $tempfile
+bootprog_info="FreeBSD/${3} ${2}, Revision ${r}\\n"
+if [ -n "${include_metadata}" ]; then
+ bootprog_info="$bootprog_info(${t} ${u}@${h})\\n"
+fi
+
+echo "char bootprog_info[] = \"$bootprog_info\";" > $tempfile
+echo "unsigned bootprog_rev = ${r%%.*}${r##*.};" >> $tempfile
mv $tempfile vers.c
diff --git a/sys/boot/common/reloc_elf.c b/sys/boot/common/reloc_elf.c
index 2b60d18..6d4a00f 100644
--- a/sys/boot/common/reloc_elf.c
+++ b/sys/boot/common/reloc_elf.c
@@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/elf.h>
-#include <errno.h>
#include <stand.h>
#define FREEBSD_ELF
diff --git a/sys/boot/efi/Makefile.inc b/sys/boot/efi/Makefile.inc
index 9d457f4..15b1f3b 100644
--- a/sys/boot/efi/Makefile.inc
+++ b/sys/boot/efi/Makefile.inc
@@ -22,4 +22,8 @@ CFLAGS+= -mno-aes
CFLAGS+= -fshort-wchar
.endif
+.if ${MACHINE_CPUARCH} == "arm"
+CFLAGS+= -fPIC
+.endif
+
.include "../Makefile.inc"
diff --git a/sys/boot/efi/boot1/boot_module.h b/sys/boot/efi/boot1/boot_module.h
index 296d5a6..3a6b827 100644
--- a/sys/boot/efi/boot1/boot_module.h
+++ b/sys/boot/efi/boot1/boot_module.h
@@ -64,7 +64,7 @@ typedef struct boot_module_t
const char *name;
/* init is the optional initialiser for the module. */
- void (*init)();
+ void (*init)(void);
/*
* probe checks to see if the module can handle dev.
@@ -89,10 +89,10 @@ typedef struct boot_module_t
void **buf, size_t *bufsize);
/* status outputs information about the probed devices. */
- void (*status)();
+ void (*status)(void);
/* valid devices as found by probe. */
- dev_info_t *(*devices)();
+ dev_info_t *(*devices)(void);
} boot_module_t;
/* Standard boot modules. */
diff --git a/sys/boot/efi/include/efiapi.h b/sys/boot/efi/include/efiapi.h
index b1a7b45..92eb513 100644
--- a/sys/boot/efi/include/efiapi.h
+++ b/sys/boot/efi/include/efiapi.h
@@ -532,6 +532,7 @@ EFI_STATUS
typedef
EFI_STATUS
(EFIAPI *EFI_RESERVED_SERVICE) (
+ VOID
);
typedef
diff --git a/sys/boot/efi/libefi/efipart.c b/sys/boot/efi/libefi/efipart.c
index aadacf0..f04f095 100644
--- a/sys/boot/efi/libefi/efipart.c
+++ b/sys/boot/efi/libefi/efipart.c
@@ -41,10 +41,8 @@ __FBSDID("$FreeBSD$");
static EFI_GUID blkio_guid = BLOCK_IO_PROTOCOL;
static int efipart_init(void);
-static int efipart_strategy(void *, int, daddr_t, size_t, size_t, char *,
- size_t *);
-static int efipart_realstrategy(void *, int, daddr_t, size_t, size_t, char *,
- size_t *);
+static int efipart_strategy(void *, int, daddr_t, size_t, char *, size_t *);
+static int efipart_realstrategy(void *, int, daddr_t, size_t, char *, size_t *);
static int efipart_open(struct open_file *, ...);
static int efipart_close(struct open_file *);
static void efipart_print(int);
@@ -284,8 +282,8 @@ efipart_readwrite(EFI_BLOCK_IO *blkio, int rw, daddr_t blk, daddr_t nblks,
}
static int
-efipart_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+efipart_strategy(void *devdata, int rw, daddr_t blk, size_t size,
+ char *buf, size_t *rsize)
{
struct bcache_devdata bcd;
struct devdesc *dev;
@@ -294,13 +292,12 @@ efipart_strategy(void *devdata, int rw, daddr_t blk, size_t offset,
bcd.dv_strategy = efipart_realstrategy;
bcd.dv_devdata = devdata;
bcd.dv_cache = PD(dev).pd_bcache;
- return (bcache_strategy(&bcd, rw, blk, offset, size,
- buf, rsize));
+ return (bcache_strategy(&bcd, rw, blk, size, buf, rsize));
}
static int
-efipart_realstrategy(void *devdata, int rw, daddr_t blk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+efipart_realstrategy(void *devdata, int rw, daddr_t blk, size_t size,
+ char *buf, size_t *rsize)
{
struct devdesc *dev = (struct devdesc *)devdata;
EFI_BLOCK_IO *blkio;
diff --git a/sys/boot/efi/loader/Makefile b/sys/boot/efi/loader/Makefile
index b36bfc1..ffa003e 100644
--- a/sys/boot/efi/loader/Makefile
+++ b/sys/boot/efi/loader/Makefile
@@ -110,13 +110,10 @@ FILESMODE_loader.efi= ${BINMODE}
LDSCRIPT= ${.CURDIR}/arch/${MACHINE}/ldscript.${MACHINE}
LDFLAGS+= -Wl,-T${LDSCRIPT} -Wl,-Bsymbolic -shared
-CLEANFILES+= vers.c loader.efi
+CLEANFILES+= loader.efi
NEWVERSWHAT= "EFI loader" ${MACHINE}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../../efi/loader/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
NM?= nm
OBJCOPY?= objcopy
diff --git a/sys/boot/efi/loader/arch/arm/ldscript.arm b/sys/boot/efi/loader/arch/arm/ldscript.arm
index 8b4a6dc..1028a44 100644
--- a/sys/boot/efi/loader/arch/arm/ldscript.arm
+++ b/sys/boot/efi/loader/arch/arm/ldscript.arm
@@ -18,7 +18,7 @@ SECTIONS
. = ALIGN(16);
.data :
{
- *(.data *.data.*)
+ *(.data .data.*)
*(.gnu.linkonce.d*)
*(.rodata)
*(.rodata.*)
diff --git a/sys/boot/efi/loader/arch/arm/start.S b/sys/boot/efi/loader/arch/arm/start.S
index 443de4a..9bc0c9f 100644
--- a/sys/boot/efi/loader/arch/arm/start.S
+++ b/sys/boot/efi/loader/arch/arm/start.S
@@ -161,7 +161,7 @@ _start:
mov r2, #0
1: cmp r0, r1
- bgt 2f
+ bge 2f
str r2, [r0], #4
b 1b
2:
diff --git a/sys/boot/efi/loader/main.c b/sys/boot/efi/loader/main.c
index 9fed1f0..0490d9b 100644
--- a/sys/boot/efi/loader/main.c
+++ b/sys/boot/efi/loader/main.c
@@ -50,10 +50,7 @@ __FBSDID("$FreeBSD$");
#include "loader_efi.h"
-extern char bootprog_name[];
-extern char bootprog_rev[];
-extern char bootprog_date[];
-extern char bootprog_maker[];
+extern char bootprog_info[];
struct arch_switch archsw; /* MI/MD interface boundary */
@@ -387,9 +384,7 @@ main(int argc, CHAR16 *argv[])
printf("EFI Firmware: %S (rev %d.%02d)\n", ST->FirmwareVendor,
ST->FirmwareRevision >> 16, ST->FirmwareRevision & 0xffff);
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
/*
* Disable the watchdog timer. By default the boot manager sets
diff --git a/sys/boot/fdt/dts/arm/a83t.dtsi b/sys/boot/fdt/dts/arm/a83t.dtsi
index 1ff4d18..b55d768 100644
--- a/sys/boot/fdt/dts/arm/a83t.dtsi
+++ b/sys/boot/fdt/dts/arm/a83t.dtsi
@@ -132,6 +132,9 @@
usbphy: phy@01c19400 {
compatible = "allwinner,sun8i-a83t-usb-phy";
+ reg = <0x01c19400 0x2c>,
+ <0x01c1a800 0x4>,
+ <0x01c1b800 0x4>;
clocks = <&usb_clk 8>,
<&usb_clk 9>,
<&usb_clk 10>,
diff --git a/sys/boot/fdt/dts/arm/ufw.dts b/sys/boot/fdt/dts/arm/ufw.dts
index d02dd3b..8b731a1 100644
--- a/sys/boot/fdt/dts/arm/ufw.dts
+++ b/sys/boot/fdt/dts/arm/ufw.dts
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Rubicon Communications (Netgate)
+ * Copyright (c) 2016, 2017 Rubicon Communications, LLC (Netgate)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -29,11 +29,247 @@
/dts-v1/;
#include "am33xx.dtsi"
-#include "ubmc.dtsi"
/ {
model = "AM335x uFW";
compatible = "ti,am335x-ufw", "ti,am335x-ubmc", "ti,am33xx";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ vmmcsd_fixed: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmcsd_fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&am33xx_pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkout2_pin>;
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x968, PIN_INPUT_PULLUP | MUX_MODE3) /* uart0_ctsn.i2c1_sda */
+ AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE3) /* uart0_rtsn.i2c1_scl */
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ clkout2_pin: pinmux_clkout2_pin {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ >;
+ };
+
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii_1_txen */
+ AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxdv.rgmii_1_rxdv */
+ AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii_1_txd3 */
+ AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii_1_txd2 */
+ AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii_1_txd1 */
+ AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii_1_txd0 */
+ AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii_1_txclk */
+ AM33XX_IOPAD(0x930, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxclk.rgmii_1_rxclk */
+ AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd3.rgmii_1_rxd3 */
+ AM33XX_IOPAD(0x938, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd2.rgmii_1_rxd2 */
+ AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd1.rgmii_1_rxd1 */
+ AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd0.rgmii_1_rxd0 */
+
+ /* Slave 2 */
+ AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gmpc_a0.rgmii_2_txen */
+ AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE2) /* gmpc_a1.rgmii_2_rxdv */
+ AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gmpc_a2.rgmii_2_txd3 */
+ AM33XX_IOPAD(0x84c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gmpc_a3.rgmii_2_txd2 */
+ AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gmpc_a4.rgmii_2_txd1 */
+ AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gmpc_a5.rgmii_2_txd0 */
+ AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gmpc_a6.rgmii_2_txclk */
+ AM33XX_IOPAD(0x85c, PIN_INPUT_PULLUP | MUX_MODE2) /* gmpc_a7.rgmii_2_rxclk */
+ AM33XX_IOPAD(0x860, PIN_INPUT_PULLUP | MUX_MODE2) /* gmpc_a8.rgmii_2_rxd3 */
+ AM33XX_IOPAD(0x864, PIN_INPUT_PULLUP | MUX_MODE2) /* gmpc_a9.rgmii_2_rxd2 */
+ AM33XX_IOPAD(0x868, PIN_INPUT_PULLUP | MUX_MODE2) /* gmpc_a10.rgmii_2_rxd1 */
+ AM33XX_IOPAD(0x86c, PIN_INPUT_PULLUP | MUX_MODE2) /* gmpc_a11.rgmii_2_rxd0 */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+
+ /* Slave 2 reset value */
+ AM33XX_IOPAD(0x840, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x848, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x84c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x850, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x854, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x858, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
+ AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
+ AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
+ AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
+ AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
+ AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
+ AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
+ AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* spi0_cs1.mmc0_cd */
+ >;
+ };
+
+ emmc_pins: pinmux_emmc_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x994, PIN_INPUT_PULLUP | MUX_MODE4) /* mcasp0_fsx.mmc1_cd */
+ AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ >;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_ctrl_mod {
+ status = "okay";
+};
+
+&usb0_phy {
+ status = "okay";
+};
+
+&usb1_phy {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&usb1 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&cppi41dma {
+ status = "okay";
+};
+
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <1>;
+ phy-mode = "rgmii";
+ dual_emac_res_vlan = <4071>;
+};
+
+&cpsw_emac1 {
+ phy_id = <&davinci_mdio>, <2>;
+ phy-mode = "rgmii";
+ dual_emac_res_vlan = <4072>;
+};
+
+&mac {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cpsw_default>;
+ pinctrl-1 = <&cpsw_sleep>;
+ active_slave = <1>;
+ status = "okay";
+ dual_emac;
+ txen-skew-ps = <0>;
+ rxdv-skew-ps = <1400>;
+ rxd0-skew-ps = <1400>;
+ rxd1-skew-ps = <1400>;
+ rxd2-skew-ps = <1400>;
+ rxd3-skew-ps = <1400>;
+ txd0-skew-ps = <0>;
+ txd1-skew-ps = <0>;
+ txd2-skew-ps = <0>;
+ txd3-skew-ps = <0>;
+ rxc-skew-ps = <4400>;
+ txc-skew-ps = <6200>;
+};
+
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+ status = "okay";
+};
+
+&aes {
+ status = "okay";
+};
+
+&sham {
+ status = "okay";
};
&mmc1 {
@@ -42,6 +278,7 @@
pinctrl-0 = <&mmc1_pins>;
bus-width = <4>;
non-removable;
+ wp-disable;
status = "okay";
};
@@ -60,6 +297,18 @@
pinctrl-0 = <&i2c0_pins>;
status = "okay";
+ clock-frequency = <400000>;
+
+ baseboard_eeprom: baseboard_eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ baseboard_data: baseboard_data@0 {
+ reg = <0 0x100>;
+ };
+ };
};
&i2c1 {
diff --git a/sys/boot/forth/loader.conf b/sys/boot/forth/loader.conf
index 4bfae24..c302526 100644
--- a/sys/boot/forth/loader.conf
+++ b/sys/boot/forth/loader.conf
@@ -101,6 +101,7 @@ module_path="/boot/modules" # Set the module search path
#prompt="\\${interpret}" # Set the command prompt
#root_disk_unit="0" # Force the root disk unit number
#rootdev="disk1s1a" # Set the root filesystem
+#dumpdev="disk1s1b" # Set a dump device early in the boot process
#tftp.blksize="1428" # Set the RFC 2348 TFTP block size.
# If the TFTP server does not support RFC 2348,
# the block size is set to 512. If the value
diff --git a/sys/boot/i386/btx/btxldr/btxldr.S b/sys/boot/i386/btx/btxldr/btxldr.S
index 848b930..7f2b909 100644
--- a/sys/boot/i386/btx/btxldr/btxldr.S
+++ b/sys/boot/i386/btx/btxldr/btxldr.S
@@ -198,7 +198,6 @@ start.3:
call putstr # message
movl $m_segs,%esi # Format string
#endif
- movl $0x2,%edi # Segment count
movl 0x1c(%ebx),%edx # Get e_phoff
addl %ebx,%edx # To pointer
movzwl 0x2c(%ebx),%ecx # Get e_phnum
@@ -216,8 +215,7 @@ start.4: cmpl $0x1,(%edx) # Is p_type PT_LOAD?
call putstr # End message
#endif
pushl %esi # Save
- pushl %edi # working
- pushl %ecx # registers
+ pushl %ecx # working registers
movl 0x4(%edx),%esi # Get p_offset
addl %ebx,%esi # as pointer
movl 0x8(%edx),%edi # Get p_vaddr
@@ -232,13 +230,9 @@ start.4: cmpl $0x1,(%edx) # Is p_type PT_LOAD?
rep # zero
stosb # them
start.5: popl %ecx # Restore
- popl %edi # working
popl %esi # registers
- decl %edi # Segments to do
- je start.7 # If none
start.6: addl $0x20,%edx # To next entry
loop start.4 # Till done
-start.7:
#ifdef BTXLDR_VERBOSE
movl $m_done,%esi # Display done
call putstr # message
diff --git a/sys/boot/i386/common/cons.c b/sys/boot/i386/common/cons.c
index b967d9b..5fb1a93 100644
--- a/sys/boot/i386/common/cons.c
+++ b/sys/boot/i386/common/cons.c
@@ -65,18 +65,17 @@ int
getc(int fn)
{
- /*
- * The extra comparison against zero is an attempt to work around
- * what appears to be a bug in QEMU and Bochs. Both emulators
- * sometimes report a key-press with scancode one and ascii zero
- * when no such key is pressed in reality. As far as I can tell,
- * this only happens shortly after a reboot.
- */
v86.ctl = V86_FLAGS;
v86.addr = 0x16;
v86.eax = fn << 8;
v86int();
- return fn == 0 ? v86.eax & 0xff : (!V86_ZR(v86.efl) && (v86.eax & 0xff));
+
+ if (fn == 0)
+ return (v86.eax);
+
+ if (V86_ZR(v86.efl))
+ return (0);
+ return (v86.eax);
}
int
@@ -106,14 +105,22 @@ getchar(void)
int
keyhit(unsigned int secs)
{
- uint32_t t0, t1;
+ uint32_t t0, t1, c;
if (OPT_CHECK(RBX_NOINTR))
return (0);
secs *= SECOND;
t0 = 0;
for (;;) {
- if (xgetc(1))
+ /*
+ * The extra comparison is an attempt to work around
+ * what appears to be a bug in QEMU and Bochs. Both emulators
+ * sometimes report a key-press with scancode one and ascii zero
+ * when no such key is pressed in reality. As far as I can tell,
+ * this only happens shortly after a reboot.
+ */
+ c = xgetc(1);
+ if (c != 0 && c != 0x0100)
return (1);
if (secs > 0) {
t1 = *(uint32_t *)PTOV(0x46c);
@@ -134,9 +141,19 @@ getstr(char *cmdstr, size_t cmdstrsize)
s = cmdstr;
for (;;) {
- switch (c = xgetc(0)) {
- case 0:
+ c = xgetc(0);
+
+ /* Translate some extended codes. */
+ switch (c) {
+ case 0x5300: /* delete */
+ c = '\177';
break;
+ default:
+ c &= 0xff;
+ break;
+ }
+
+ switch (c) {
case '\177':
case '\b':
if (s > cmdstr) {
@@ -149,9 +166,11 @@ getstr(char *cmdstr, size_t cmdstrsize)
*s = 0;
return;
default:
- if (s - cmdstr < cmdstrsize - 1)
- *s++ = c;
- putchar(c);
+ if (c >= 0x20 && c <= 0x7e) {
+ if (s - cmdstr < cmdstrsize - 1)
+ *s++ = c;
+ putchar(c);
+ }
break;
}
}
diff --git a/sys/boot/i386/libfirewire/firewire.c b/sys/boot/i386/libfirewire/firewire.c
index 2c7ee32..52f3805 100644
--- a/sys/boot/i386/libfirewire/firewire.c
+++ b/sys/boot/i386/libfirewire/firewire.c
@@ -66,7 +66,7 @@ struct crom_src_buf {
static int fw_init(void);
static int fw_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int fw_open(struct open_file *f, ...);
static int fw_close(struct open_file *f);
static void fw_print(int verbose);
@@ -201,7 +201,7 @@ fw_cleanup()
}
static int
-fw_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+fw_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
return (EIO);
diff --git a/sys/boot/i386/libi386/bioscd.c b/sys/boot/i386/libi386/bioscd.c
index 1ea6906..27d33f4 100644
--- a/sys/boot/i386/libi386/bioscd.c
+++ b/sys/boot/i386/libi386/bioscd.c
@@ -95,9 +95,9 @@ static int nbcinfo = 0;
static int bc_read(int unit, daddr_t dblk, int blks, caddr_t dest);
static int bc_init(void);
static int bc_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int bc_realstrategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int bc_open(struct open_file *f, ...);
static int bc_close(struct open_file *f);
static void bc_print(int verbose);
@@ -231,7 +231,7 @@ bc_close(struct open_file *f)
}
static int
-bc_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bc_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct bcache_devdata bcd;
@@ -242,11 +242,11 @@ bc_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
bcd.dv_devdata = devdata;
bcd.dv_cache = BC(dev).bc_bcache;
- return (bcache_strategy(&bcd, rw, dblk, offset, size, buf, rsize));
+ return (bcache_strategy(&bcd, rw, dblk, size, buf, rsize));
}
static int
-bc_realstrategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bc_realstrategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct i386_devdesc *dev;
diff --git a/sys/boot/i386/libi386/biosdisk.c b/sys/boot/i386/libi386/biosdisk.c
index d10555f..2419c6b 100644
--- a/sys/boot/i386/libi386/biosdisk.c
+++ b/sys/boot/i386/libi386/biosdisk.c
@@ -128,10 +128,10 @@ static int bd_write(struct disk_devdesc *dev, daddr_t dblk, int blks,
static int bd_int13probe(struct bdinfo *bd);
static int bd_init(void);
-static int bd_strategy(void *devdata, int flag, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize);
-static int bd_realstrategy(void *devdata, int flag, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize);
+static int bd_strategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize);
+static int bd_realstrategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize);
static int bd_open(struct open_file *f, ...);
static int bd_close(struct open_file *f);
static int bd_ioctl(struct open_file *f, u_long cmd, void *data);
@@ -478,7 +478,7 @@ bd_ioctl(struct open_file *f, u_long cmd, void *data)
}
static int
-bd_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bd_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct bcache_devdata bcd;
@@ -488,12 +488,12 @@ bd_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
bcd.dv_strategy = bd_realstrategy;
bcd.dv_devdata = devdata;
bcd.dv_cache = BD(dev).bd_bcache;
- return (bcache_strategy(&bcd, rw, dblk + dev->d_offset, offset,
+ return (bcache_strategy(&bcd, rw, dblk + dev->d_offset,
size, buf, rsize));
}
static int
-bd_realstrategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bd_realstrategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct disk_devdesc *dev = (struct disk_devdesc *)devdata;
diff --git a/sys/boot/i386/libi386/pxe.c b/sys/boot/i386/libi386/pxe.c
index 844ebf2..c65b9cc 100644
--- a/sys/boot/i386/libi386/pxe.c
+++ b/sys/boot/i386/libi386/pxe.c
@@ -72,7 +72,7 @@ static void bangpxe_call(int func);
static int pxe_init(void);
static int pxe_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int pxe_open(struct open_file *f, ...);
static int pxe_close(struct open_file *f);
static void pxe_print(int verbose);
@@ -247,8 +247,8 @@ pxe_init(void)
static int
-pxe_strategy(void *devdata, int flag, daddr_t dblk, size_t offset, size_t size,
- char *buf, size_t *rsize)
+pxe_strategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize)
{
return (EIO);
}
diff --git a/sys/boot/i386/loader/Makefile b/sys/boot/i386/loader/Makefile
index 3f54454..c8d31b8 100644
--- a/sys/boot/i386/loader/Makefile
+++ b/sys/boot/i386/loader/Makefile
@@ -8,6 +8,7 @@ PROG= ${LOADER}.sym
MAN=
INTERNALPROG=
NEWVERSWHAT?= "bootstrap loader" x86
+VERSION_FILE= ${.CURDIR}/../loader/version
# architecture-specific loader code
SRCS= main.c conf.c vers.c
@@ -72,7 +73,7 @@ CFLAGS+= -I${.CURDIR}/../../.. -D_STAND
CFLAGS+= -I${.CURDIR}/../../common
CFLAGS+= -I.
-CLEANFILES= vers.c ${LOADER} ${LOADER}.bin loader.help
+CLEANFILES= ${LOADER} ${LOADER}.bin loader.help
CFLAGS+= -Wall
LDFLAGS= -static -Ttext 0x0
@@ -93,10 +94,6 @@ CFLAGS+= -I${.CURDIR}/../btx/lib
# Pick up ../Makefile.inc early.
.include <bsd.init.mk>
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../loader/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../loader/version \
- ${NEWVERSWHAT}
-
${LOADER}: ${LOADER}.bin ${BTXLDR} ${BTXKERN}
btxld -v -f aout -e ${LOADER_ADDRESS} -o ${.TARGET} -l ${BTXLDR} \
-b ${BTXKERN} ${LOADER}.bin
diff --git a/sys/boot/i386/loader/main.c b/sys/boot/i386/loader/main.c
index bc32593..9b034ab 100644
--- a/sys/boot/i386/loader/main.c
+++ b/sys/boot/i386/loader/main.c
@@ -77,7 +77,7 @@ static void i386_zfs_probe(void);
#endif
/* from vers.c */
-extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
+extern char bootprog_info[];
/* XXX debugging */
extern char end[];
@@ -215,9 +215,7 @@ main(void)
/* detect PCI BIOS for future reference */
biospci_detect();
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
extract_currdev(); /* set $currdev and $loaddev */
setenv("LINES", "24", 1); /* optional */
diff --git a/sys/boot/mips/beri/loader/Makefile b/sys/boot/mips/beri/loader/Makefile
index d20c01f..77262f3 100644
--- a/sys/boot/mips/beri/loader/Makefile
+++ b/sys/boot/mips/beri/loader/Makefile
@@ -92,7 +92,7 @@ CFLAGS+= -I${.CURDIR}/../common
# Loader-specific MD headers
CFLAGS+= -I${.CURDIR}
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
# Generate code appropriate for the loader environment
CFLAGS+= -G0 \
@@ -114,10 +114,6 @@ LIBSTAND= ${.OBJDIR}/../../../../../lib/libstand/libstand.a
DPADD= ${LIBFICL} ${LIBSTAND}
LDADD= ${LIBFICL} ${LIBSTAND}
-vers.c: ${.CURDIR}/../../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../../common/newvers.sh ${.CURDIR}/version \
- ${NEWVERSWHAT}
-
loader.help: help.common help.mips
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/mips/beri/loader/beri_disk_cfi.c b/sys/boot/mips/beri/loader/beri_disk_cfi.c
index b2b6d00..d16e01a 100644
--- a/sys/boot/mips/beri/loader/beri_disk_cfi.c
+++ b/sys/boot/mips/beri/loader/beri_disk_cfi.c
@@ -45,7 +45,7 @@ static int beri_cfi_disk_init(void);
static int beri_cfi_disk_open(struct open_file *, ...);
static int beri_cfi_disk_close(struct open_file *);
static void beri_cfi_disk_cleanup(void);
-static int beri_cfi_disk_strategy(void *, int, daddr_t, size_t, size_t,
+static int beri_cfi_disk_strategy(void *, int, daddr_t, size_t,
char *, size_t *);
static void beri_cfi_disk_print(int);
@@ -69,8 +69,8 @@ beri_cfi_disk_init(void)
}
static int
-beri_cfi_disk_strategy(void *devdata, int flag, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsizep)
+beri_cfi_disk_strategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsizep)
{
int error;
diff --git a/sys/boot/mips/beri/loader/beri_disk_sdcard.c b/sys/boot/mips/beri/loader/beri_disk_sdcard.c
index 2577e14..67ba78a 100644
--- a/sys/boot/mips/beri/loader/beri_disk_sdcard.c
+++ b/sys/boot/mips/beri/loader/beri_disk_sdcard.c
@@ -45,7 +45,7 @@ static int beri_sdcard_disk_init(void);
static int beri_sdcard_disk_open(struct open_file *, ...);
static int beri_sdcard_disk_close(struct open_file *);
static void beri_sdcard_disk_cleanup(void);
-static int beri_sdcard_disk_strategy(void *, int, daddr_t, size_t, size_t,
+static int beri_sdcard_disk_strategy(void *, int, daddr_t, size_t,
char *, size_t *);
static void beri_sdcard_disk_print(int);
@@ -69,8 +69,8 @@ beri_sdcard_disk_init(void)
}
static int
-beri_sdcard_disk_strategy(void *devdata, int flag, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsizep)
+beri_sdcard_disk_strategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsizep)
{
int error;
diff --git a/sys/boot/mips/beri/loader/loader.h b/sys/boot/mips/beri/loader/loader.h
index 31a4215..e4152e7 100644
--- a/sys/boot/mips/beri/loader/loader.h
+++ b/sys/boot/mips/beri/loader/loader.h
@@ -58,7 +58,6 @@ extern struct bootinfo boot2_bootinfo;
int md_load64(char *args, vm_offset_t *modulep);
/* vers.c */
-extern char bootprog_name[], bootprog_rev[];
-extern char bootprog_date[], bootprog_maker[];
+extern char bootprog_info[];
#endif /* !_BOOT_LOADER_H_ */
diff --git a/sys/boot/mips/beri/loader/main.c b/sys/boot/mips/beri/loader/main.c
index 8253fb2..2d201d8 100644
--- a/sys/boot/mips/beri/loader/main.c
+++ b/sys/boot/mips/beri/loader/main.c
@@ -144,9 +144,7 @@ main(int argc, char *argv[], char *envv[], struct bootinfo *bootinfop)
}
extract_currdev(bootinfop);
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
#if 0
printf("bootpath=\"%s\"\n", bootpath);
#endif
diff --git a/sys/boot/mips/uboot/Makefile b/sys/boot/mips/uboot/Makefile
index 6be362c..c5d06f0 100644
--- a/sys/boot/mips/uboot/Makefile
+++ b/sys/boot/mips/uboot/Makefile
@@ -99,7 +99,7 @@ LIBFICL= ${.OBJDIR}/../../ficl/libficl.a
CFLAGS+= -I${.CURDIR}/../../common
CFLAGS+= -I.
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
CFLAGS+= -ffreestanding -msoft-float -g
@@ -128,9 +128,6 @@ LDADD= ${LIBFICL} ${LIBUBOOT} ${LIBFDT} ${LIBUBOOT_FDT} ${LIBSTAND}
OBJS+= ${SRCS:N*.h:R:S/$/.o/g}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
loader.help: help.common help.uboot ${.CURDIR}/../../fdt/help.fdt
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/ofw/common/main.c b/sys/boot/ofw/common/main.c
index b15df5f..3c0bbdf 100644
--- a/sys/boot/ofw/common/main.c
+++ b/sys/boot/ofw/common/main.c
@@ -36,10 +36,7 @@ __FBSDID("$FreeBSD$");
struct arch_switch archsw; /* MI/MD interface boundary */
extern char end[];
-extern char bootprog_name[];
-extern char bootprog_rev[];
-extern char bootprog_date[];
-extern char bootprog_maker[];
+extern char bootprog_info[];
u_int32_t acells, scells;
@@ -127,9 +124,7 @@ main(int (*openfirm)(void *))
if (devsw[i]->dv_init != NULL)
(devsw[i]->dv_init)();
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
printf("Memory: %lldKB\n", memsize() / 1024);
OF_getprop(chosen, "bootpath", bootpath, 64);
diff --git a/sys/boot/ofw/libofw/ofw_disk.c b/sys/boot/ofw/libofw/ofw_disk.c
index 9c46ccc..9b2e11c 100644
--- a/sys/boot/ofw/libofw/ofw_disk.c
+++ b/sys/boot/ofw/libofw/ofw_disk.c
@@ -43,7 +43,7 @@ __FBSDID("$FreeBSD$");
static int ofwd_init(void);
static int ofwd_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int ofwd_open(struct open_file *f, ...);
static int ofwd_close(struct open_file *f);
static int ofwd_ioctl(struct open_file *f, u_long cmd, void *data);
@@ -83,8 +83,8 @@ ofwd_init(void)
}
static int
-ofwd_strategy(void *devdata, int flag __unused, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+ofwd_strategy(void *devdata, int flag __unused, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize)
{
struct ofw_devdesc *dp = (struct ofw_devdesc *)devdata;
daddr_t pos;
diff --git a/sys/boot/pc98/libpc98/bioscd.c b/sys/boot/pc98/libpc98/bioscd.c
index 15758cc..f358dab 100644
--- a/sys/boot/pc98/libpc98/bioscd.c
+++ b/sys/boot/pc98/libpc98/bioscd.c
@@ -94,9 +94,9 @@ static int nbcinfo = 0;
static int bc_read(int unit, daddr_t dblk, int blks, caddr_t dest);
static int bc_init(void);
static int bc_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int bc_realstrategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int bc_open(struct open_file *f, ...);
static int bc_close(struct open_file *f);
static void bc_print(int verbose);
@@ -227,7 +227,7 @@ bc_close(struct open_file *f)
}
static int
-bc_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bc_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct bcache_devdata bcd;
@@ -238,11 +238,11 @@ bc_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
bcd.dv_devdata = devdata;
bcd.dv_cache = BC(dev).bc_bcache;
- return (bcache_strategy(&bcd, rw, dblk, offset, size, buf, rsize));
+ return (bcache_strategy(&bcd, rw, dblk, size, buf, rsize));
}
static int
-bc_realstrategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bc_realstrategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct i386_devdesc *dev;
diff --git a/sys/boot/pc98/libpc98/biosdisk.c b/sys/boot/pc98/libpc98/biosdisk.c
index 0ceeb7b..01971e7 100644
--- a/sys/boot/pc98/libpc98/biosdisk.c
+++ b/sys/boot/pc98/libpc98/biosdisk.c
@@ -118,9 +118,9 @@ static int bd_printbsdslice(struct open_disk *od, daddr_t offset,
static int bd_init(void);
static int bd_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int bd_realstrategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int bd_open(struct open_file *f, ...);
static int bd_close(struct open_file *f);
static void bd_print(int verbose);
@@ -740,7 +740,7 @@ bd_closedisk(struct open_disk *od)
}
static int
-bd_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
+bd_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
char *buf, size_t *rsize)
{
struct bcache_devdata bcd;
@@ -750,12 +750,11 @@ bd_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size,
bcd.dv_strategy = bd_realstrategy;
bcd.dv_devdata = devdata;
bcd.dv_cache = BD(dev).bd_bcache;
- return(bcache_strategy(&bcd, rw, dblk+od->od_boff, offset,
- size, buf, rsize));
+ return(bcache_strategy(&bcd, rw, dblk+od->od_boff, size, buf, rsize));
}
static int
-bd_realstrategy(void *devdata, int rw, daddr_t dblk, size_t offset,
+bd_realstrategy(void *devdata, int rw, daddr_t dblk,
size_t size, char *buf, size_t *rsize)
{
struct open_disk *od = (struct open_disk *)(((struct i386_devdesc *)devdata)->d_kind.biosdisk.data);
diff --git a/sys/boot/pc98/loader/Makefile b/sys/boot/pc98/loader/Makefile
index 319600f..d75e8d0 100644
--- a/sys/boot/pc98/loader/Makefile
+++ b/sys/boot/pc98/loader/Makefile
@@ -8,6 +8,7 @@ LOADER?= loader
PROG= ${LOADER}.sym
INTERNALPROG=
NEWVERSWHAT= "bootstrap loader" pc98
+VERSION_FILE= ${.CURDIR}/../../i386/loader/version
# architecture-specific loader code
SRCS= main.c conf.c vers.c
@@ -48,7 +49,7 @@ CFLAGS+= -I${.CURDIR}/../../common
CFLAGS+= -I${.CURDIR}/../../i386
CFLAGS+= -I.
-CLEANFILES= vers.c ${LOADER} ${LOADER}.bin loader.help
+CLEANFILES= ${LOADER} ${LOADER}.bin loader.help
CFLAGS+= -Wall
LDFLAGS= -static -Ttext 0x0
@@ -69,9 +70,6 @@ CFLAGS+= -I${.CURDIR}/../btx/lib
# Pick up ../Makefile.inc early.
.include <bsd.init.mk>
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../../i386/loader/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../../i386/loader/version ${NEWVERSWHAT}
-
${LOADER}: ${LOADER}.bin ${BTXLDR} ${BTXKERN}
btxld -v -f aout -e ${LOADER_ADDRESS} -o ${.TARGET} -l ${BTXLDR} \
-b ${BTXKERN} ${LOADER}.bin
diff --git a/sys/boot/pc98/loader/main.c b/sys/boot/pc98/loader/main.c
index 39444ad..c31cc84 100644
--- a/sys/boot/pc98/loader/main.c
+++ b/sys/boot/pc98/loader/main.c
@@ -66,7 +66,7 @@ static void isa_outb(int port, int value);
void exit(int code);
/* from vers.c */
-extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
+extern char bootprog_info[];
/* XXX debugging */
extern char end[];
@@ -186,9 +186,7 @@ main(void)
initial_bootinfo->bi_extmem = bios_extmem / 1024;
}
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
extract_currdev(); /* set $currdev and $loaddev */
setenv("LINES", "24", 1); /* optional */
diff --git a/sys/boot/powerpc/kboot/Makefile b/sys/boot/powerpc/kboot/Makefile
index c161a9c..ca46398 100644
--- a/sys/boot/powerpc/kboot/Makefile
+++ b/sys/boot/powerpc/kboot/Makefile
@@ -76,7 +76,7 @@ CFLAGS+= -mcpu=powerpc64
CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../../..
CFLAGS+= -I.
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
CFLAGS+= -Wall -ffreestanding -msoft-float -DAIM
# load address. set in linker script
@@ -99,9 +99,6 @@ CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/
DPADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND}
LDADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
loader.help: help.common help.kboot ${.CURDIR}/../../fdt/help.fdt
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/powerpc/kboot/hostdisk.c b/sys/boot/powerpc/kboot/hostdisk.c
index ac4534c..2deb956 100644
--- a/sys/boot/powerpc/kboot/hostdisk.c
+++ b/sys/boot/powerpc/kboot/hostdisk.c
@@ -33,7 +33,7 @@ __FBSDID("$FreeBSD$");
static int hostdisk_init(void);
static int hostdisk_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int hostdisk_open(struct open_file *f, ...);
static int hostdisk_close(struct open_file *f);
static int hostdisk_ioctl(struct open_file *f, u_long cmd, void *data);
@@ -58,8 +58,8 @@ hostdisk_init(void)
}
static int
-hostdisk_strategy(void *devdata, int flag, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+hostdisk_strategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize)
{
struct devdesc *desc = devdata;
daddr_t pos;
diff --git a/sys/boot/powerpc/kboot/main.c b/sys/boot/powerpc/kboot/main.c
index ec5ba6b..ecc2622 100644
--- a/sys/boot/powerpc/kboot/main.c
+++ b/sys/boot/powerpc/kboot/main.c
@@ -38,10 +38,7 @@ __FBSDID("$FreeBSD$");
struct arch_switch archsw;
extern void *_end;
-extern char bootprog_name[];
-extern char bootprog_rev[];
-extern char bootprog_date[];
-extern char bootprog_maker[];
+extern char bootprog_info[];
int kboot_getdev(void **vdev, const char *devspec, const char **path);
ssize_t kboot_copyin(const void *src, vm_offset_t dest, const size_t len);
@@ -119,9 +116,7 @@ main(int argc, const char **argv)
archsw.arch_autoload = kboot_autoload;
archsw.arch_loadaddr = kboot_loadaddr;
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
setenv("currdev", bootdev, 1);
setenv("loaddev", bootdev, 1);
diff --git a/sys/boot/powerpc/ofw/Makefile b/sys/boot/powerpc/ofw/Makefile
index eccf3c5..5ce47cf 100644
--- a/sys/boot/powerpc/ofw/Makefile
+++ b/sys/boot/powerpc/ofw/Makefile
@@ -73,7 +73,7 @@ LIBFICL= ${.OBJDIR}/../../ficl/libficl.a
CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../../..
CFLAGS+= -I.
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
CFLAGS+= -ffreestanding -msoft-float
# load address. set in linker script
@@ -97,9 +97,6 @@ CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/
DPADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND}
LDADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
loader.help: help.common help.ofw ${.CURDIR}/../../fdt/help.fdt
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/powerpc/ps3/Makefile b/sys/boot/powerpc/ps3/Makefile
index 0cbc166..b2f2ef6 100644
--- a/sys/boot/powerpc/ps3/Makefile
+++ b/sys/boot/powerpc/ps3/Makefile
@@ -76,7 +76,7 @@ CFLAGS+= -mcpu=powerpc64
CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../../..
CFLAGS+= -I.
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
CFLAGS+= -Wall -ffreestanding -msoft-float -DAIM
# load address. set in linker script
@@ -101,9 +101,6 @@ SC_DFLT_FONT=cp437
font.h:
uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
loader.help: help.common help.ps3 ${.CURDIR}/../../fdt/help.fdt
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/powerpc/ps3/main.c b/sys/boot/powerpc/ps3/main.c
index bb1fd84..76fdfa4 100644
--- a/sys/boot/powerpc/ps3/main.c
+++ b/sys/boot/powerpc/ps3/main.c
@@ -41,10 +41,7 @@ __FBSDID("$FreeBSD$");
struct arch_switch archsw;
extern void *_end;
-extern char bootprog_name[];
-extern char bootprog_rev[];
-extern char bootprog_date[];
-extern char bootprog_maker[];
+extern char bootprog_info[];
int ps3_getdev(void **vdev, const char *devspec, const char **path);
ssize_t ps3_copyin(const void *src, vm_offset_t dest, const size_t len);
@@ -133,9 +130,7 @@ main(void)
archsw.arch_readin = ps3_readin;
archsw.arch_autoload = ps3_autoload;
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
printf("Memory: %lldKB\n", maxmem / 1024);
env_setenv("currdev", EV_VOLATILE, ps3_fmtdev(&currdev),
diff --git a/sys/boot/powerpc/ps3/ps3cdrom.c b/sys/boot/powerpc/ps3/ps3cdrom.c
index c5019e0..843ecd5 100644
--- a/sys/boot/powerpc/ps3/ps3cdrom.c
+++ b/sys/boot/powerpc/ps3/ps3cdrom.c
@@ -46,7 +46,7 @@
static int ps3cdrom_init(void);
static int ps3cdrom_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int ps3cdrom_open(struct open_file *f, ...);
static int ps3cdrom_close(struct open_file *f);
static void ps3cdrom_print(int verbose);
@@ -76,7 +76,7 @@ static int ps3cdrom_init(void)
}
static int ps3cdrom_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize)
+ size_t size, char *buf, size_t *rsize)
{
struct ps3_devdesc *dev = (struct ps3_devdesc *) devdata;
int err;
diff --git a/sys/boot/powerpc/ps3/ps3disk.c b/sys/boot/powerpc/ps3/ps3disk.c
index 52a43f1..2247c23 100644
--- a/sys/boot/powerpc/ps3/ps3disk.c
+++ b/sys/boot/powerpc/ps3/ps3disk.c
@@ -58,7 +58,7 @@ static void ps3disk_uuid_letoh(uuid_t *uuid);
static int ps3disk_init(void);
static int ps3disk_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int ps3disk_open(struct open_file *f, ...);
static int ps3disk_close(struct open_file *f);
static void ps3disk_print(int verbose);
@@ -109,7 +109,7 @@ static int ps3disk_init(void)
}
static int ps3disk_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize)
+ size_t size, char *buf, size_t *rsize)
{
struct ps3_devdesc *dev = (struct ps3_devdesc *) devdata;
struct open_dev *od = (struct open_dev *) dev->d_disk.data;
diff --git a/sys/boot/powerpc/uboot/Makefile b/sys/boot/powerpc/uboot/Makefile
index 681249a..6ff3acf 100644
--- a/sys/boot/powerpc/uboot/Makefile
+++ b/sys/boot/powerpc/uboot/Makefile
@@ -79,7 +79,7 @@ LIBFICL= ${.OBJDIR}/../../ficl/libficl.a
CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../../..
CFLAGS+= -I.
-CLEANFILES+= vers.c ${PROG}.help
+CLEANFILES+= ${PROG}.help
CFLAGS+= -ffreestanding
@@ -102,9 +102,6 @@ CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/
DPADD= ${LIBFICL} ${LIBUBOOT} ${LIBFDT} ${LIBUBOOT_FDT} ${LIBSTAND}
LDADD= ${LIBFICL} ${LIBUBOOT} ${LIBFDT} ${LIBUBOOT_FDT} ${LIBSTAND}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
loader.help: help.common help.uboot ${.CURDIR}/../../fdt/help.fdt
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/sparc64/loader/Makefile b/sys/boot/sparc64/loader/Makefile
index 2884cf0..5cdbdbb 100644
--- a/sys/boot/sparc64/loader/Makefile
+++ b/sys/boot/sparc64/loader/Makefile
@@ -6,6 +6,7 @@ MAN=
PROG?= loader
NEWVERSWHAT?= "bootstrap loader" sparc64
+VERSION_FILE= ${.CURDIR}/../loader/version
INSTALLFLAGS= -b
# Architecture-specific loader code
@@ -70,7 +71,7 @@ LIBFICL= ${.OBJDIR}/../../ficl/libficl.a
CFLAGS+= -I${.CURDIR}/../../common
CFLAGS+= -I.
-CLEANFILES+= vers.c loader.help
+CLEANFILES+= loader.help
LDFLAGS= -static
@@ -84,10 +85,6 @@ CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/
DPADD= ${LIBFICL} ${LIBZFSBOOT} ${LIBOFW} ${LIBSTAND}
LDADD= ${LIBFICL} ${LIBZFSBOOT} ${LIBOFW} -lstand
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../loader/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../loader/version \
- ${NEWVERSWHAT}
-
loader.help: help.common help.sparc64
cat ${.ALLSRC} | \
awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET}
diff --git a/sys/boot/sparc64/loader/main.c b/sys/boot/sparc64/loader/main.c
index ae1e559..d7b4e1e 100644
--- a/sys/boot/sparc64/loader/main.c
+++ b/sys/boot/sparc64/loader/main.c
@@ -75,7 +75,7 @@ __FBSDID("$FreeBSD$");
#include "libofw.h"
#include "dev_net.h"
-extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
+extern char bootprog_info[];
enum {
HEAPVA = 0x800000,
@@ -891,9 +891,7 @@ main(int (*openfirm)(void *))
env_setenv("loaddev", EV_VOLATILE, bootpath,
env_noset, env_nounset);
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
printf("bootpath=\"%s\"\n", bootpath);
/* Give control to the machine independent loader code. */
diff --git a/sys/boot/uboot/common/main.c b/sys/boot/uboot/common/main.c
index d5c7664..3b97a6e 100644
--- a/sys/boot/uboot/common/main.c
+++ b/sys/boot/uboot/common/main.c
@@ -62,10 +62,7 @@ struct device_type {
};
extern char end[];
-extern char bootprog_name[];
-extern char bootprog_rev[];
-extern char bootprog_date[];
-extern char bootprog_maker[];
+extern char bootprog_info[];
extern unsigned char _etext[];
extern unsigned char _edata[];
@@ -431,9 +428,7 @@ main(int argc, char **argv)
cons_probe();
printf("Compatible U-Boot API signature found @%p\n", sig);
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
printf("\n");
dump_sig(sig);
diff --git a/sys/boot/uboot/lib/disk.c b/sys/boot/uboot/lib/disk.c
index 741b1f8..1963274 100644
--- a/sys/boot/uboot/lib/disk.c
+++ b/sys/boot/uboot/lib/disk.c
@@ -73,8 +73,7 @@ static int stor_readdev(struct disk_devdesc *, daddr_t, size_t, char *);
/* devsw I/F */
static int stor_init(void);
-static int stor_strategy(void *, int, daddr_t, size_t, size_t, char *,
- size_t *);
+static int stor_strategy(void *, int, daddr_t, size_t, char *, size_t *);
static int stor_open(struct open_file *, ...);
static int stor_close(struct open_file *);
static int stor_ioctl(struct open_file *f, u_long cmd, void *data);
@@ -144,7 +143,7 @@ stor_cleanup(void)
}
static int
-stor_strategy(void *devdata, int rw, daddr_t blk, size_t offset, size_t size,
+stor_strategy(void *devdata, int rw, daddr_t blk, size_t size,
char *buf, size_t *rsize)
{
struct disk_devdesc *dev = (struct disk_devdesc *)devdata;
diff --git a/sys/boot/usb/storage/umass_loader.c b/sys/boot/usb/storage/umass_loader.c
index 85b89e8..4311b2b 100644
--- a/sys/boot/usb/storage/umass_loader.c
+++ b/sys/boot/usb/storage/umass_loader.c
@@ -48,8 +48,7 @@ static int umass_disk_open(struct open_file *,...);
static int umass_disk_close(struct open_file *);
static void umass_disk_cleanup(void);
static int umass_disk_ioctl(struct open_file *, u_long, void *);
-static int umass_disk_strategy(void *, int, daddr_t, size_t, size_t, char *,
- size_t *);
+static int umass_disk_strategy(void *, int, daddr_t, size_t, char *, size_t *);
static void umass_disk_print(int);
struct devsw umass_disk = {
@@ -85,8 +84,8 @@ umass_disk_init(void)
}
static int
-umass_disk_strategy(void *devdata, int flag, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsizep)
+umass_disk_strategy(void *devdata, int flag, daddr_t dblk, size_t size,
+ char *buf, size_t *rsizep)
{
if (umass_uaa.device == NULL)
return (ENXIO);
diff --git a/sys/boot/userboot/userboot/Makefile b/sys/boot/userboot/userboot/Makefile
index ac3db42..043dbfb 100644
--- a/sys/boot/userboot/userboot/Makefile
+++ b/sys/boot/userboot/userboot/Makefile
@@ -39,11 +39,6 @@ LDFLAGS+= -nostdlib -Wl,-Bsymbolic
NEWVERSWHAT= "User boot" ${MACHINE_CPUARCH}
-vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
- sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
-
-CLEANFILES= vers.c
-
.if ${MK_FORTH} != "no"
BOOT_FORTH= yes
CFLAGS+= -DBOOT_FORTH -I${.CURDIR}/../../ficl -I${.CURDIR}/../../ficl/i386
diff --git a/sys/boot/userboot/userboot/host.c b/sys/boot/userboot/userboot/host.c
index 3320a7f..81858a9 100644
--- a/sys/boot/userboot/userboot/host.c
+++ b/sys/boot/userboot/userboot/host.c
@@ -167,8 +167,8 @@ host_dev_close(struct open_file *f)
}
static int
-host_dev_strategy(void *devdata, int rw, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+host_dev_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize)
{
return (ENOSYS);
diff --git a/sys/boot/userboot/userboot/main.c b/sys/boot/userboot/userboot/main.c
index 4c50400..a8ff315 100644
--- a/sys/boot/userboot/userboot/main.c
+++ b/sys/boot/userboot/userboot/main.c
@@ -51,10 +51,7 @@ static int userboot_zfs_found;
struct loader_callbacks *callbacks;
void *callbacks_arg;
-extern char bootprog_name[];
-extern char bootprog_rev[];
-extern char bootprog_date[];
-extern char bootprog_maker[];
+extern char bootprog_info[];
static jmp_buf jb;
struct arch_switch archsw; /* MI/MD interface boundary */
@@ -101,9 +98,7 @@ loader_main(struct loader_callbacks *cb, void *arg, int version, int ndisks)
*/
cons_probe();
- printf("\n");
- printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
- printf("(%s, %s)\n", bootprog_maker, bootprog_date);
+ printf("\n%s", bootprog_info);
#if 0
printf("Memory: %ld k\n", memsize() / 1024);
#endif
diff --git a/sys/boot/userboot/userboot/userboot_disk.c b/sys/boot/userboot/userboot/userboot_disk.c
index 56fe7e3..ce7dee5 100644
--- a/sys/boot/userboot/userboot/userboot_disk.c
+++ b/sys/boot/userboot/userboot/userboot_disk.c
@@ -54,9 +54,9 @@ static struct userdisk_info *ud_info;
static int userdisk_init(void);
static void userdisk_cleanup(void);
static int userdisk_strategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int userdisk_realstrategy(void *devdata, int flag, daddr_t dblk,
- size_t offset, size_t size, char *buf, size_t *rsize);
+ size_t size, char *buf, size_t *rsize);
static int userdisk_open(struct open_file *f, ...);
static int userdisk_close(struct open_file *f);
static int userdisk_ioctl(struct open_file *f, u_long cmd, void *data);
@@ -176,8 +176,8 @@ userdisk_close(struct open_file *f)
}
static int
-userdisk_strategy(void *devdata, int rw, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+userdisk_strategy(void *devdata, int rw, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize)
{
struct bcache_devdata bcd;
struct disk_devdesc *dev;
@@ -186,13 +186,13 @@ userdisk_strategy(void *devdata, int rw, daddr_t dblk, size_t offset,
bcd.dv_strategy = userdisk_realstrategy;
bcd.dv_devdata = devdata;
bcd.dv_cache = ud_info[dev->d_unit].ud_bcache;
- return (bcache_strategy(&bcd, rw, dblk + dev->d_offset, offset,
+ return (bcache_strategy(&bcd, rw, dblk + dev->d_offset,
size, buf, rsize));
}
static int
-userdisk_realstrategy(void *devdata, int rw, daddr_t dblk, size_t offset,
- size_t size, char *buf, size_t *rsize)
+userdisk_realstrategy(void *devdata, int rw, daddr_t dblk, size_t size,
+ char *buf, size_t *rsize)
{
struct disk_devdesc *dev = devdata;
uint64_t off;
diff --git a/sys/boot/zfs/zfs.c b/sys/boot/zfs/zfs.c
index 229bcac..466d18f 100644
--- a/sys/boot/zfs/zfs.c
+++ b/sys/boot/zfs/zfs.c
@@ -579,7 +579,7 @@ zfs_dev_close(struct open_file *f)
}
static int
-zfs_dev_strategy(void *devdata, int rw, daddr_t dblk, size_t offset, size_t size, char *buf, size_t *rsize)
+zfs_dev_strategy(void *devdata, int rw, daddr_t dblk, size_t size, char *buf, size_t *rsize)
{
return (ENOSYS);
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 1745644..afffb91 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -780,6 +780,13 @@ struct ccb_accept_tio {
struct scsi_sense_data sense_data;
};
+static __inline uint8_t *
+atio_cdb_ptr(struct ccb_accept_tio *ccb)
+{
+ return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
+ ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
+}
+
/* Release SIM Queue */
struct ccb_relsim {
struct ccb_hdr ccb_h;
diff --git a/sys/cam/cam_periph.c b/sys/cam/cam_periph.c
index 942a6c3..78b5541 100644
--- a/sys/cam/cam_periph.c
+++ b/sys/cam/cam_periph.c
@@ -1925,10 +1925,7 @@ cam_periph_devctl_notify(union ccb *ccb)
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
sbuf_printf(&sb, "CDB=\"");
- if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
- scsi_cdb_sbuf(ccb->csio.cdb_io.cdb_ptr, &sb);
- else
- scsi_cdb_sbuf(ccb->csio.cdb_io.cdb_bytes, &sb);
+ scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
sbuf_printf(&sb, "\" ");
} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
sbuf_printf(&sb, "ACB=\"");
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 63a8e36..b80a7d8 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
* Copyright (c) 2012 The FreeBSD Foundation
- * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed by Edward Tomasz Napierala
@@ -407,6 +407,9 @@ SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
static int ctl_debug = CTL_DEBUG_NONE;
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
&ctl_debug, 0, "Enabled debug flags");
+static int ctl_lun_map_size = 1024;
+SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN,
+ &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)");
/*
* Supported pages (0x00), Serial number (0x80), Device ID (0x83),
@@ -421,10 +424,10 @@ static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
static int ctl_init(void);
-void ctl_shutdown(void);
+static int ctl_shutdown(void);
static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
-static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
+static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
struct ctl_ooa *ooa_hdr,
struct ctl_ooa_entry *kern_entries);
@@ -434,7 +437,6 @@ static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
struct ctl_be_lun *be_lun);
static int ctl_free_lun(struct ctl_lun *lun);
static void ctl_create_lun(struct ctl_be_lun *be_lun);
-static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
static int ctl_do_mode_select(union ctl_io *io);
static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
@@ -445,7 +447,7 @@ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
struct scsi_per_res_out_parms* param);
static void ctl_pro_preempt_other(struct ctl_lun *lun,
union ctl_ha_msg *msg);
-static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
+static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io);
static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
@@ -518,6 +520,8 @@ static const struct ctl_cmd_entry *
ctl_validate_command(struct ctl_scsiio *ctsio);
static int ctl_cmd_applicable(uint8_t lun_type,
const struct ctl_cmd_entry *entry);
+static int ctl_ha_init(void);
+static int ctl_ha_shutdown(void);
static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
@@ -559,18 +563,60 @@ MODULE_VERSION(ctl, 1);
static struct ctl_frontend ha_frontend =
{
.name = "ha",
+ .init = ctl_ha_init,
+ .shutdown = ctl_ha_shutdown,
+};
+
+static int
+ctl_ha_init(void)
+{
+ struct ctl_softc *softc = control_softc;
+
+ if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
+ &softc->othersc_pool) != 0)
+ return (ENOMEM);
+ if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
+ ctl_pool_free(softc->othersc_pool);
+ return (EIO);
+ }
+ if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ ctl_ha_msg_destroy(softc);
+ ctl_pool_free(softc->othersc_pool);
+ return (EIO);
+ }
+ return (0);
+};
+
+static int
+ctl_ha_shutdown(void)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_port *port;
+
+ ctl_ha_msg_shutdown(softc);
+ if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS)
+ return (EIO);
+ if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
+ return (EIO);
+ ctl_pool_free(softc->othersc_pool);
+ while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) {
+ ctl_port_deregister(port);
+ free(port->port_name, M_CTL);
+ free(port, M_CTL);
+ }
+ return (0);
};
static void
ctl_ha_datamove(union ctl_io *io)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(io);
struct ctl_sg_entry *sgl;
union ctl_ha_msg msg;
uint32_t sg_entries_sent;
int do_sg_copy, i, j;
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
memset(&msg.dt, 0, sizeof(msg.dt));
msg.hdr.msg_type = CTL_MSG_DATAMOVE;
msg.hdr.original_sc = io->io_hdr.original_sc;
@@ -695,8 +741,6 @@ ctl_ha_done(union ctl_io *io)
msg.scsi.tag_num = io->scsiio.tag_num;
msg.scsi.tag_type = io->scsiio.tag_type;
msg.scsi.sense_len = io->scsiio.sense_len;
- msg.scsi.sense_residual = io->scsiio.sense_residual;
- msg.scsi.residual = io->scsiio.residual;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
io->scsiio.sense_len);
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
@@ -724,8 +768,6 @@ ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
ctsio->io_hdr.status = msg_info->hdr.status;
ctsio->scsi_status = msg_info->scsi.scsi_status;
ctsio->sense_len = msg_info->scsi.sense_len;
- ctsio->sense_residual = msg_info->scsi.sense_residual;
- ctsio->residual = msg_info->scsi.residual;
memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
msg_info->scsi.sense_len);
ctl_enqueue_isc((union ctl_io *)ctsio);
@@ -826,7 +868,7 @@ ctl_isc_announce_port(struct ctl_port *port)
return;
i = sizeof(msg->port) + strlen(port->port_name) + 1;
if (port->lun_map)
- i += sizeof(uint32_t) * CTL_MAX_LUNS;
+ i += port->lun_map_size * sizeof(uint32_t);
if (port->port_devid)
i += port->port_devid->len;
if (port->target_devid)
@@ -846,7 +888,7 @@ ctl_isc_announce_port(struct ctl_port *port)
"%d:%s", softc->ha_id, port->port_name) + 1;
i += msg->port.name_len;
if (port->lun_map) {
- msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS;
+ msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t);
memcpy(&msg->port.data[i], port->lun_map,
msg->port.lun_map_len);
i += msg->port.lun_map_len;
@@ -1024,27 +1066,27 @@ ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
mtx_lock(&softc->ctl_lock);
- if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
- (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) {
- mtx_lock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
- if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES &&
- msg->ua.ua_set)
- memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
- if (msg->ua.ua_all) {
- if (msg->ua.ua_set)
- ctl_est_ua_all(lun, iid, msg->ua.ua_type);
- else
- ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
- } else {
- if (msg->ua.ua_set)
- ctl_est_ua(lun, iid, msg->ua.ua_type);
- else
- ctl_clr_ua(lun, iid, msg->ua.ua_type);
- }
- mtx_unlock(&lun->lun_lock);
- } else
+ if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
+ return;
+ }
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set)
+ memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
+ if (msg->ua.ua_all) {
+ if (msg->ua.ua_set)
+ ctl_est_ua_all(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
+ } else {
+ if (msg->ua.ua_set)
+ ctl_est_ua(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua(lun, iid, msg->ua.ua_type);
+ }
+ mtx_unlock(&lun->lun_lock);
}
static void
@@ -1058,8 +1100,8 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
- ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
@@ -1074,7 +1116,7 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
mtx_unlock(&lun->lun_lock);
printf("%s: Received conflicting HA LUN %d\n",
- __func__, msg->hdr.nexus.targ_lun);
+ __func__, targ_lun);
return;
} else {
/* Record whether peer is primary. */
@@ -1108,7 +1150,7 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
mtx_unlock(&lun->lun_lock);
CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
- __func__, msg->hdr.nexus.targ_lun,
+ __func__, targ_lun,
(msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
"primary" : "secondary"));
@@ -1155,19 +1197,25 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
M_CTL);
i += msg->port.name_len;
if (msg->port.lun_map_len != 0) {
- if (port->lun_map == NULL)
- port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
+ if (port->lun_map == NULL ||
+ port->lun_map_size * sizeof(uint32_t) <
+ msg->port.lun_map_len) {
+ port->lun_map_size = 0;
+ free(port->lun_map, M_CTL);
+ port->lun_map = malloc(msg->port.lun_map_len,
M_CTL, M_WAITOK);
- memcpy(port->lun_map, &msg->port.data[i],
- sizeof(uint32_t) * CTL_MAX_LUNS);
+ }
+ memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len);
+ port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t);
i += msg->port.lun_map_len;
} else {
+ port->lun_map_size = 0;
free(port->lun_map, M_CTL);
port->lun_map = NULL;
}
if (msg->port.port_devid_len != 0) {
if (port->port_devid == NULL ||
- port->port_devid->len != msg->port.port_devid_len) {
+ port->port_devid->len < msg->port.port_devid_len) {
free(port->port_devid, M_CTL);
port->port_devid = malloc(sizeof(struct ctl_devid) +
msg->port.port_devid_len, M_CTL, M_WAITOK);
@@ -1182,7 +1230,7 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
}
if (msg->port.target_devid_len != 0) {
if (port->target_devid == NULL ||
- port->target_devid->len != msg->port.target_devid_len) {
+ port->target_devid->len < msg->port.target_devid_len) {
free(port->target_devid, M_CTL);
port->target_devid = malloc(sizeof(struct ctl_devid) +
msg->port.target_devid_len, M_CTL, M_WAITOK);
@@ -1197,7 +1245,7 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
}
if (msg->port.init_devid_len != 0) {
if (port->init_devid == NULL ||
- port->init_devid->len != msg->port.init_devid_len) {
+ port->init_devid->len < msg->port.init_devid_len) {
free(port->init_devid, M_CTL);
port->init_devid = malloc(sizeof(struct ctl_devid) +
msg->port.init_devid_len, M_CTL, M_WAITOK);
@@ -1218,7 +1266,7 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
}
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
@@ -1289,8 +1337,8 @@ ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
- ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
@@ -1488,13 +1536,12 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
- io->io_hdr.port_status = msg->scsi.fetd_status;
- io->scsiio.residual = msg->scsi.residual;
+ io->io_hdr.port_status = msg->scsi.port_status;
+ io->scsiio.kern_data_resid = msg->scsi.kern_data_resid;
if (msg->hdr.status != CTL_STATUS_NONE) {
io->io_hdr.status = msg->hdr.status;
io->scsiio.scsi_status = msg->scsi.scsi_status;
io->scsiio.sense_len = msg->scsi.sense_len;
- io->scsiio.sense_residual =msg->scsi.sense_residual;
memcpy(&io->scsiio.sense_data,
&msg->scsi.sense_data,
msg->scsi.sense_len);
@@ -1780,7 +1827,6 @@ ctl_init(void)
{
struct make_dev_args args;
struct ctl_softc *softc;
- void *other_pool;
int i, error;
softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
@@ -1794,7 +1840,8 @@ ctl_init(void)
args.mda_si_drv1 = softc;
error = make_dev_s(&args, &softc->dev, "cam/ctl");
if (error != 0) {
- free(control_softc, M_DEVBUF);
+ free(softc, M_DEVBUF);
+ control_softc = NULL;
return (error);
}
@@ -1806,7 +1853,7 @@ ctl_init(void)
if (softc->sysctl_tree == NULL) {
printf("%s: unable to allocate sysctl tree\n", __func__);
destroy_dev(softc->dev);
- free(control_softc, M_DEVBUF);
+ free(softc, M_DEVBUF);
control_softc = NULL;
return (ENOMEM);
}
@@ -1852,15 +1899,6 @@ ctl_init(void)
STAILQ_INIT(&softc->be_list);
ctl_tpc_init(softc);
- if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
- &other_pool) != 0)
- {
- printf("ctl: can't allocate %d entry other SC pool, "
- "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
- return (ENOMEM);
- }
- softc->othersc_pool = other_pool;
-
if (worker_threads <= 0)
worker_threads = max(1, mp_ncpus / 4);
if (worker_threads > CTL_MAX_THREADS)
@@ -1880,22 +1918,19 @@ ctl_init(void)
&softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
if (error != 0) {
printf("error creating CTL work thread!\n");
- ctl_pool_free(other_pool);
return (error);
}
}
error = kproc_kthread_add(ctl_lun_thread, softc,
- &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
+ &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun");
if (error != 0) {
printf("error creating CTL lun thread!\n");
- ctl_pool_free(other_pool);
return (error);
}
error = kproc_kthread_add(ctl_thresh_thread, softc,
- &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
+ &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh");
if (error != 0) {
printf("error creating CTL threshold thread!\n");
- ctl_pool_free(other_pool);
return (error);
}
@@ -1904,58 +1939,54 @@ ctl_init(void)
softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
if (softc->is_single == 0) {
- ctl_frontend_register(&ha_frontend);
- if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
- printf("ctl_init: ctl_ha_msg_init failed.\n");
- softc->is_single = 1;
- } else
- if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
- != CTL_HA_STATUS_SUCCESS) {
- printf("ctl_init: ctl_ha_msg_register failed.\n");
+ if (ctl_frontend_register(&ha_frontend) != 0)
softc->is_single = 1;
- }
}
return (0);
}
-void
+static int
ctl_shutdown(void)
{
struct ctl_softc *softc = control_softc;
- struct ctl_lun *lun, *next_lun;
+ int i;
- if (softc->is_single == 0) {
- ctl_ha_msg_shutdown(softc);
- if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
- != CTL_HA_STATUS_SUCCESS)
- printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
- if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
- printf("%s: ctl_ha_msg_destroy failed.\n", __func__);
+ if (softc->is_single == 0)
ctl_frontend_deregister(&ha_frontend);
- }
- mtx_lock(&softc->ctl_lock);
-
- STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun)
- ctl_free_lun(lun);
-
- mtx_unlock(&softc->ctl_lock);
+ destroy_dev(softc->dev);
-#if 0
- ctl_shutdown_thread(softc->work_thread);
- mtx_destroy(&softc->queue_lock);
-#endif
+ /* Shutdown CTL threads. */
+ softc->shutdown = 1;
+ for (i = 0; i < worker_threads; i++) {
+ struct ctl_thread *thr = &softc->threads[i];
+ while (thr->thread != NULL) {
+ wakeup(thr);
+ if (thr->thread != NULL)
+ pause("CTL thr shutdown", 1);
+ }
+ mtx_destroy(&thr->queue_lock);
+ }
+ while (softc->lun_thread != NULL) {
+ wakeup(&softc->pending_lun_queue);
+ if (softc->lun_thread != NULL)
+ pause("CTL thr shutdown", 1);
+ }
+ while (softc->thresh_thread != NULL) {
+ wakeup(softc->thresh_thread);
+ if (softc->thresh_thread != NULL)
+ pause("CTL thr shutdown", 1);
+ }
ctl_tpc_shutdown(softc);
uma_zdestroy(softc->io_zone);
mtx_destroy(&softc->ctl_lock);
- destroy_dev(softc->dev);
-
sysctl_ctx_free(&softc->sysctl_ctx);
- free(control_softc, M_DEVBUF);
+ free(softc, M_DEVBUF);
control_softc = NULL;
+ return (0);
}
static int
@@ -1966,7 +1997,7 @@ ctl_module_event_handler(module_t mod, int what, void *arg)
case MOD_LOAD:
return (ctl_init());
case MOD_UNLOAD:
- return (EBUSY);
+ return (ctl_shutdown());
default:
return (EOPNOTSUPP);
}
@@ -2193,22 +2224,19 @@ ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
* command on this side (XFER mode) or tell the other side to execute it
* (SER_ONLY mode).
*/
-static int
+static void
ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
union ctl_ha_msg msg_info;
- struct ctl_port *port;
struct ctl_lun *lun;
const struct ctl_cmd_entry *entry;
- int retval = 0;
uint32_t targ_lun;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
- mtx_lock(&softc->ctl_lock);
/* Make sure that we know about this port. */
- port = ctl_io_port(&ctsio->io_hdr);
if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
/*retry_count*/ 1);
@@ -2216,24 +2244,11 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
}
/* Make sure that we know about this LUN. */
- if ((targ_lun < CTL_MAX_LUNS) &&
- ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
- mtx_lock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
- /*
- * If the LUN is invalid, pretend that it doesn't exist.
- * It will go away as soon as all pending I/O has been
- * completed.
- */
- if (lun->flags & CTL_LUN_DISABLED) {
- mtx_unlock(&lun->lun_lock);
- lun = NULL;
- }
- } else {
+ mtx_lock(&softc->ctl_lock);
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
- lun = NULL;
- }
- if (lun == NULL) {
+
/*
* The other node would not send this request to us unless
* received announce that we are primary node for this LUN.
@@ -2243,6 +2258,18 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
ctl_set_busy(ctsio);
goto badjuju;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+
+ /*
+ * If the LUN is invalid, pretend that it doesn't exist.
+ * It will go away as soon as all pending I/Os completed.
+ */
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_busy(ctsio);
+ goto badjuju;
+ }
entry = ctl_get_cmd_entry(ctsio, NULL);
if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
@@ -2250,8 +2277,8 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
goto badjuju;
}
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
+ CTL_LUN(ctsio) = lun;
+ CTL_BACKEND_LUN(ctsio) = lun->be_lun;
/*
* Every I/O goes into the OOA queue for a
@@ -2314,10 +2341,9 @@ badjuju:
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.scsi), M_WAITOK);
- retval = 1;
+ ctl_free_io((union ctl_io *)ctsio);
break;
}
- return (retval);
}
/*
@@ -2564,6 +2590,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
struct ctl_softc *softc = dev->si_drv1;
+ struct ctl_port *port;
struct ctl_lun *lun;
int retval;
@@ -2685,9 +2712,9 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
mtx_lock(&softc->ctl_lock);
- if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
- && ((ooa_hdr->lun_num >= CTL_MAX_LUNS)
- || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
+ if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
+ (ooa_hdr->lun_num >= CTL_MAX_LUNS ||
+ softc->ctl_luns[ooa_hdr->lun_num] == NULL)) {
mtx_unlock(&softc->ctl_lock);
free(entries, M_CTL);
printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
@@ -2739,89 +2766,75 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
#ifdef CTL_IO_DELAY
mtx_lock(&softc->ctl_lock);
-
- if ((delay_info->lun_id >= CTL_MAX_LUNS)
- || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
+ if (delay_info->lun_id >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) {
+ mtx_unlock(&softc->ctl_lock);
delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
- } else {
- lun = softc->ctl_luns[delay_info->lun_id];
- mtx_lock(&lun->lun_lock);
-
- delay_info->status = CTL_DELAY_STATUS_OK;
-
- switch (delay_info->delay_type) {
- case CTL_DELAY_TYPE_CONT:
- break;
- case CTL_DELAY_TYPE_ONESHOT:
- break;
- default:
- delay_info->status =
- CTL_DELAY_STATUS_INVALID_TYPE;
- break;
- }
-
- switch (delay_info->delay_loc) {
- case CTL_DELAY_LOC_DATAMOVE:
- lun->delay_info.datamove_type =
- delay_info->delay_type;
- lun->delay_info.datamove_delay =
- delay_info->delay_secs;
- break;
- case CTL_DELAY_LOC_DONE:
- lun->delay_info.done_type =
- delay_info->delay_type;
- lun->delay_info.done_delay =
- delay_info->delay_secs;
- break;
- default:
- delay_info->status =
- CTL_DELAY_STATUS_INVALID_LOC;
- break;
- }
- mtx_unlock(&lun->lun_lock);
+ break;
}
-
+ mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
+ delay_info->status = CTL_DELAY_STATUS_OK;
+ switch (delay_info->delay_type) {
+ case CTL_DELAY_TYPE_CONT:
+ case CTL_DELAY_TYPE_ONESHOT:
+ break;
+ default:
+ delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE;
+ break;
+ }
+ switch (delay_info->delay_loc) {
+ case CTL_DELAY_LOC_DATAMOVE:
+ lun->delay_info.datamove_type = delay_info->delay_type;
+ lun->delay_info.datamove_delay = delay_info->delay_secs;
+ break;
+ case CTL_DELAY_LOC_DONE:
+ lun->delay_info.done_type = delay_info->delay_type;
+ lun->delay_info.done_delay = delay_info->delay_secs;
+ break;
+ default:
+ delay_info->status = CTL_DELAY_STATUS_INVALID_LOC;
+ break;
+ }
+ mtx_unlock(&lun->lun_lock);
#else
delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
#endif /* CTL_IO_DELAY */
break;
}
+#ifdef CTL_LEGACY_STATS
case CTL_GETSTATS: {
- struct ctl_stats *stats;
+ struct ctl_stats *stats = (struct ctl_stats *)addr;
int i;
- stats = (struct ctl_stats *)addr;
-
- if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
- stats->alloc_len) {
- stats->status = CTL_SS_NEED_MORE_SPACE;
- stats->num_luns = softc->num_luns;
- break;
- }
/*
* XXX KDM no locking here. If the LUN list changes,
* things can blow up.
*/
i = 0;
+ stats->status = CTL_SS_OK;
+ stats->fill_len = 0;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- retval = copyout(&lun->stats, &stats->lun_stats[i++],
- sizeof(lun->stats));
+ if (stats->fill_len + sizeof(lun->legacy_stats) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ break;
+ }
+ retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++],
+ sizeof(lun->legacy_stats));
if (retval != 0)
break;
+ stats->fill_len += sizeof(lun->legacy_stats);
}
stats->num_luns = softc->num_luns;
- stats->fill_len = sizeof(struct ctl_lun_io_stats) *
- softc->num_luns;
- stats->status = CTL_SS_OK;
-#ifdef CTL_TIME_IO
- stats->flags = CTL_STATS_FLAG_TIME_VALID;
-#else
stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+ stats->flags |= CTL_STATS_FLAG_TIME_VALID;
#endif
getnanouptime(&stats->timestamp);
break;
}
+#endif /* CTL_LEGACY_STATS */
case CTL_ERROR_INJECT: {
struct ctl_error_desc *err_desc, *new_err_desc;
@@ -2832,8 +2845,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
mtx_lock(&softc->ctl_lock);
- lun = softc->ctl_luns[err_desc->lun_id];
- if (lun == NULL) {
+ if (err_desc->lun_id >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
free(new_err_desc, M_CTL);
printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
@@ -2876,8 +2889,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
delete_done = 0;
mtx_lock(&softc->ctl_lock);
- lun = softc->ctl_luns[delete_desc->lun_id];
- if (lun == NULL) {
+ if (delete_desc->lun_id >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
__func__, (uintmax_t)delete_desc->lun_id);
@@ -2906,18 +2919,18 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_DUMP_STRUCTS: {
- int i, j, k;
+ int j, k;
struct ctl_port *port;
struct ctl_frontend *fe;
mtx_lock(&softc->ctl_lock);
printf("CTL Persistent Reservation information start:\n");
- for (i = 0; i < CTL_MAX_LUNS; i++) {
- lun = softc->ctl_luns[i];
-
- if ((lun == NULL)
- || ((lun->flags & CTL_LUN_DISABLED) != 0))
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ if ((lun->flags & CTL_LUN_DISABLED) != 0) {
+ mtx_unlock(&lun->lun_lock);
continue;
+ }
for (j = 0; j < CTL_MAX_PORTS; j++) {
if (lun->pr_keys[j] == NULL)
@@ -2925,11 +2938,12 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
if (lun->pr_keys[j][k] == 0)
continue;
- printf(" LUN %d port %d iid %d key "
- "%#jx\n", i, j, k,
+ printf(" LUN %ju port %d iid %d key "
+ "%#jx\n", lun->lun, j, k,
(uintmax_t)lun->pr_keys[j][k]);
}
}
+ mtx_unlock(&lun->lun_lock);
}
printf("CTL Persistent Reservation information end\n");
printf("CTL Ports:\n");
@@ -3310,9 +3324,9 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (port->lun_map != NULL) {
sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
- for (j = 0; j < CTL_MAX_LUNS; j++) {
+ for (j = 0; j < port->lun_map_size; j++) {
plun = ctl_lun_map_from_port(port, j);
- if (plun >= CTL_MAX_LUNS)
+ if (plun == UINT32_MAX)
continue;
sbuf_printf(sb,
"\t<lun id=\"%u\">%u</lun>\n",
@@ -3380,8 +3394,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
if (port->status & CTL_PORT_STATUS_ONLINE) {
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >=
- CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) ==
+ UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_port(lun, lm->port, -1,
@@ -3390,7 +3404,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
}
mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
- if (lm->plun < CTL_MAX_LUNS) {
+ if (lm->plun != UINT32_MAX) {
if (lm->lun == UINT32_MAX)
retval = ctl_lun_map_unset(port, lm->plun);
else if (lm->lun < CTL_MAX_LUNS &&
@@ -3398,17 +3412,82 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = ctl_lun_map_set(port, lm->plun, lm->lun);
else
return (ENXIO);
- } else if (lm->plun == UINT32_MAX) {
+ } else {
if (lm->lun == UINT32_MAX)
retval = ctl_lun_map_deinit(port);
else
retval = ctl_lun_map_init(port);
- } else
- return (ENXIO);
+ }
if (port->status & CTL_PORT_STATUS_ONLINE)
ctl_isc_announce_port(port);
break;
}
+ case CTL_GET_LUN_STATS: {
+ struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
+ int i;
+
+ /*
+ * XXX KDM no locking here. If the LUN list changes,
+ * things can blow up.
+ */
+ i = 0;
+ stats->status = CTL_SS_OK;
+ stats->fill_len = 0;
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if (lun->lun < stats->first_item)
+ continue;
+ if (stats->fill_len + sizeof(lun->stats) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ break;
+ }
+ retval = copyout(&lun->stats, &stats->stats[i++],
+ sizeof(lun->stats));
+ if (retval != 0)
+ break;
+ stats->fill_len += sizeof(lun->stats);
+ }
+ stats->num_items = softc->num_luns;
+ stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+ stats->flags |= CTL_STATS_FLAG_TIME_VALID;
+#endif
+ getnanouptime(&stats->timestamp);
+ break;
+ }
+ case CTL_GET_PORT_STATS: {
+ struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
+ int i;
+
+ /*
+ * XXX KDM no locking here. If the LUN list changes,
+ * things can blow up.
+ */
+ i = 0;
+ stats->status = CTL_SS_OK;
+ stats->fill_len = 0;
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port->targ_port < stats->first_item)
+ continue;
+ if (stats->fill_len + sizeof(port->stats) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ break;
+ }
+ retval = copyout(&port->stats, &stats->stats[i++],
+ sizeof(port->stats));
+ if (retval != 0)
+ break;
+ stats->fill_len += sizeof(port->stats);
+ }
+ stats->num_items = softc->num_ports;
+ stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+ stats->flags |= CTL_STATS_FLAG_TIME_VALID;
+#endif
+ getnanouptime(&stats->timestamp);
+ break;
+ }
default: {
/* XXX KDM should we fix this? */
#if 0
@@ -3457,15 +3536,20 @@ ctl_lun_map_init(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
struct ctl_lun *lun;
+ int size = ctl_lun_map_size;
uint32_t i;
- if (port->lun_map == NULL)
- port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
+ if (port->lun_map == NULL || port->lun_map_size < size) {
+ port->lun_map_size = 0;
+ free(port->lun_map, M_CTL);
+ port->lun_map = malloc(size * sizeof(uint32_t),
M_CTL, M_NOWAIT);
+ }
if (port->lun_map == NULL)
return (ENOMEM);
- for (i = 0; i < CTL_MAX_LUNS; i++)
+ for (i = 0; i < size; i++)
port->lun_map[i] = UINT32_MAX;
+ port->lun_map_size = size;
if (port->status & CTL_PORT_STATUS_ONLINE) {
if (port->lun_disable != NULL) {
STAILQ_FOREACH(lun, &softc->lun_list, links)
@@ -3484,6 +3568,7 @@ ctl_lun_map_deinit(struct ctl_port *port)
if (port->lun_map == NULL)
return (0);
+ port->lun_map_size = 0;
free(port->lun_map, M_CTL);
port->lun_map = NULL;
if (port->status & CTL_PORT_STATUS_ONLINE) {
@@ -3507,9 +3592,11 @@ ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
if (status != 0)
return (status);
}
+ if (plun >= port->lun_map_size)
+ return (EINVAL);
old = port->lun_map[plun];
port->lun_map[plun] = glun;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) {
if (port->lun_enable != NULL)
port->lun_enable(port->targ_lun_arg, plun);
ctl_isc_announce_port(port);
@@ -3522,11 +3609,11 @@ ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
{
uint32_t old;
- if (port->lun_map == NULL)
+ if (port->lun_map == NULL || plun >= port->lun_map_size)
return (0);
old = port->lun_map[plun];
port->lun_map[plun] = UINT32_MAX;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) {
if (port->lun_disable != NULL)
port->lun_disable(port->targ_lun_arg, plun);
ctl_isc_announce_port(port);
@@ -3540,8 +3627,10 @@ ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
if (port == NULL)
return (UINT32_MAX);
- if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS)
+ if (port->lun_map == NULL)
return (lun_id);
+ if (lun_id > port->lun_map_size)
+ return (UINT32_MAX);
return (port->lun_map[lun_id]);
}
@@ -3554,7 +3643,7 @@ ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
return (UINT32_MAX);
if (port->lun_map == NULL)
return (lun_id);
- for (i = 0; i < CTL_MAX_LUNS; i++) {
+ for (i = 0; i < port->lun_map_size; i++) {
if (port->lun_map[i] == lun_id)
return (i);
}
@@ -3622,13 +3711,6 @@ ctl_encode_lun(uint32_t decoded)
return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
}
-static struct ctl_port *
-ctl_io_port(struct ctl_io_hdr *io_hdr)
-{
-
- return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
-}
-
int
ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
{
@@ -3746,7 +3828,6 @@ int
ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
uint32_t total_ctl_io, void **npool)
{
-#ifdef IO_POOLS
struct ctl_io_pool *pool;
pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
@@ -3756,14 +3837,15 @@ ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
pool->ctl_softc = ctl_softc;
+#ifdef IO_POOLS
pool->zone = uma_zsecond_create(pool->name, NULL,
NULL, NULL, NULL, ctl_softc->io_zone);
/* uma_prealloc(pool->zone, total_ctl_io); */
-
- *npool = pool;
#else
- *npool = ctl_softc->io_zone;
+ pool->zone = ctl_softc->io_zone;
#endif
+
+ *npool = pool;
return (0);
}
@@ -3776,64 +3858,54 @@ ctl_pool_free(struct ctl_io_pool *pool)
#ifdef IO_POOLS
uma_zdestroy(pool->zone);
- free(pool, M_CTL);
#endif
+ free(pool, M_CTL);
}
union ctl_io *
ctl_alloc_io(void *pool_ref)
{
- union ctl_io *io;
-#ifdef IO_POOLS
struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
+ union ctl_io *io;
io = uma_zalloc(pool->zone, M_WAITOK);
-#else
- io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
-#endif
- if (io != NULL)
+ if (io != NULL) {
io->io_hdr.pool = pool_ref;
+ CTL_SOFTC(io) = pool->ctl_softc;
+ }
return (io);
}
union ctl_io *
ctl_alloc_io_nowait(void *pool_ref)
{
- union ctl_io *io;
-#ifdef IO_POOLS
struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
+ union ctl_io *io;
io = uma_zalloc(pool->zone, M_NOWAIT);
-#else
- io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
-#endif
- if (io != NULL)
+ if (io != NULL) {
io->io_hdr.pool = pool_ref;
+ CTL_SOFTC(io) = pool->ctl_softc;
+ }
return (io);
}
void
ctl_free_io(union ctl_io *io)
{
-#ifdef IO_POOLS
struct ctl_io_pool *pool;
-#endif
if (io == NULL)
return;
-#ifdef IO_POOLS
pool = (struct ctl_io_pool *)io->io_hdr.pool;
uma_zfree(pool->zone, io);
-#else
- uma_zfree((uma_zone_t)io->io_hdr.pool, io);
-#endif
}
void
ctl_zero_io(union ctl_io *io)
{
- void *pool_ref;
+ struct ctl_io_pool *pool;
if (io == NULL)
return;
@@ -3841,9 +3913,10 @@ ctl_zero_io(union ctl_io *io)
/*
* May need to preserve linked list pointers at some point too.
*/
- pool_ref = io->io_hdr.pool;
+ pool = io->io_hdr.pool;
memset(io, 0, sizeof(*io));
- io->io_hdr.pool = pool_ref;
+ io->io_hdr.pool = pool;
+ CTL_SOFTC(io) = pool->ctl_softc;
}
int
@@ -4409,7 +4482,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
struct scsi_vpd_id_descriptor *desc;
struct scsi_vpd_id_t10 *t10id;
const char *eui, *naa, *scsiname, *uuid, *vendor, *value;
- int lun_number, i, lun_malloced;
+ int lun_number, lun_malloced;
int devidlen, idlen1, idlen2 = 0, len;
if (be_lun == NULL)
@@ -4543,6 +4616,8 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
printf("ctl: requested LUN ID %d is already "
"in use\n", be_lun->req_lun_id);
}
+fail:
+ free(lun->lun_devid, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
free(lun, M_CTL);
be_lun->lun_config_status(be_lun->be_lun,
@@ -4555,14 +4630,11 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (lun_number == -1) {
mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl: can't allocate LUN, out of LUNs\n");
- if (lun->flags & CTL_LUN_MALLOCED)
- free(lun, M_CTL);
- be_lun->lun_config_status(be_lun->be_lun,
- CTL_LUN_CONFIG_FAILURE);
- return (ENOSPC);
+ goto fail;
}
}
ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
+ mtx_unlock(&ctl_softc->ctl_lock);
mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
lun->lun = lun_number;
@@ -4603,6 +4675,10 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
lun->ie_reported = 1;
callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
ctl_tpc_lun_init(lun);
+ if (lun->flags & CTL_LUN_REMOVABLE) {
+ lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4,
+ M_CTL, M_WAITOK);
+ }
/*
* Initialize the mode and log page index.
@@ -4610,31 +4686,31 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
ctl_init_page_index(lun);
ctl_init_log_page_index(lun);
+ /* Setup statistics gathering */
+#ifdef CTL_LEGACY_STATS
+ lun->legacy_stats.device_type = be_lun->lun_type;
+ lun->legacy_stats.lun_number = lun_number;
+ lun->legacy_stats.blocksize = be_lun->blocksize;
+ if (be_lun->blocksize == 0)
+ lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
+ for (len = 0; len < CTL_MAX_PORTS; len++)
+ lun->legacy_stats.ports[len].targ_port = len;
+#endif /* CTL_LEGACY_STATS */
+ lun->stats.item = lun_number;
+
/*
* Now, before we insert this lun on the lun list, set the lun
* inventory changed UA for all other luns.
*/
+ mtx_lock(&ctl_softc->ctl_lock);
STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
mtx_lock(&nlun->lun_lock);
ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
mtx_unlock(&nlun->lun_lock);
}
-
STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
-
ctl_softc->ctl_luns[lun_number] = lun;
-
ctl_softc->num_luns++;
-
- /* Setup statistics gathering */
- lun->stats.device_type = be_lun->lun_type;
- lun->stats.lun_number = lun_number;
- lun->stats.blocksize = be_lun->blocksize;
- if (be_lun->blocksize == 0)
- lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
- for (i = 0;i < CTL_MAX_PORTS;i++)
- lun->stats.ports[i].targ_port = i;
-
mtx_unlock(&ctl_softc->ctl_lock);
lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
@@ -4650,12 +4726,10 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
static int
ctl_free_lun(struct ctl_lun *lun)
{
- struct ctl_softc *softc;
+ struct ctl_softc *softc = lun->ctl_softc;
struct ctl_lun *nlun;
int i;
- softc = lun->ctl_softc;
-
mtx_assert(&softc->ctl_lock, MA_OWNED);
STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
@@ -4686,6 +4760,7 @@ ctl_free_lun(struct ctl_lun *lun)
for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pr_keys[i], M_CTL);
free(lun->write_buffer, M_CTL);
+ free(lun->prevent, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
free(lun, M_CTL);
@@ -5001,18 +5076,13 @@ ctl_config_move_done(union ctl_io *io)
if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
- /*
- * For hardware error sense keys, the sense key
- * specific value is defined to be a retry count,
- * but we use it to pass back an internal FETD
- * error code. XXX KDM Hopefully the FETD is only
- * using 16 bits for an error code, since that's
- * all the space we have in the sks field.
- */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/
- io->io_hdr.port_status);
+ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+ /*retry_count*/ io->io_hdr.port_status);
+ } else if (io->scsiio.kern_data_resid != 0 &&
+ (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_invalid_field_ciu(&io->scsiio);
}
if (ctl_debug & CTL_DEBUG_CDB_DATA)
@@ -5156,13 +5226,12 @@ ctl_config_read_done(union ctl_io *io)
int
ctl_scsi_release(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
uint32_t residx;
CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
/*
* XXX KDM right now, we only support LUN reservation. We don't
@@ -5194,13 +5263,12 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
int
ctl_scsi_reserve(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
uint32_t residx;
CTL_DEBUG_PRINT(("ctl_reserve\n"));
residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
/*
* XXX KDM right now, we only support LUN reservation. We don't
@@ -5235,13 +5303,12 @@ bailout:
int
ctl_start_stop(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_start_stop_unit *cdb;
- struct ctl_lun *lun;
int retval;
CTL_DEBUG_PRINT(("ctl_start_stop\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
if ((cdb->how & SSS_PC_MASK) == 0) {
@@ -5290,17 +5357,16 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
int
ctl_prevent_allow(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_prevent *cdb;
int retval;
uint32_t initidx;
CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_prevent *)ctsio->cdb;
- if ((lun->flags & CTL_LUN_REMOVABLE) == 0) {
+ if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) {
ctl_set_invalid_opcode(ctsio);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
@@ -5331,8 +5397,7 @@ ctl_prevent_allow(struct ctl_scsiio *ctsio)
int
ctl_sync_cache(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
- struct ctl_softc *softc;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t starting_lba;
uint32_t block_count;
@@ -5341,8 +5406,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
retval = 0;
switch (ctsio->cdb[0]) {
@@ -5398,13 +5461,10 @@ int
ctl_format(struct ctl_scsiio *ctsio)
{
struct scsi_format *cdb;
- struct ctl_lun *lun;
int length, defect_list_len;
CTL_DEBUG_PRINT(("ctl_format\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
cdb = (struct scsi_format *)ctsio->cdb;
length = 0;
@@ -5420,7 +5480,6 @@ ctl_format(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
ctsio->kern_data_len = length;
ctsio->kern_total_len = length;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5483,7 +5542,7 @@ bailout:
int
ctl_read_buffer(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
uint64_t buffer_offset;
uint32_t len;
uint8_t byte2;
@@ -5491,7 +5550,7 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
static uint8_t echo_descr[4] = { 0 };
CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
switch (ctsio->cdb[0]) {
case READ_BUFFER: {
struct scsi_read_buffer *cdb;
@@ -5546,7 +5605,6 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
}
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctl_set_success(ctsio);
@@ -5558,13 +5616,12 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
int
ctl_write_buffer(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_write_buffer *cdb;
- struct ctl_lun *lun;
int buffer_offset, len;
CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_write_buffer *)ctsio->cdb;
len = scsi_3btoul(cdb->length);
@@ -5593,7 +5650,6 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5611,7 +5667,7 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
int
ctl_write_same(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
@@ -5620,8 +5676,6 @@ ctl_write_same(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_write_same\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
switch (ctsio->cdb[0]) {
case WRITE_SAME_10: {
struct scsi_write_same_10 *cdb;
@@ -5703,7 +5757,6 @@ ctl_write_same(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5725,7 +5778,7 @@ ctl_write_same(struct ctl_scsiio *ctsio)
int
ctl_unmap(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_unmap *cdb;
struct ctl_ptr_len_flags *ptrlen;
struct scsi_unmap_header *hdr;
@@ -5737,9 +5790,7 @@ ctl_unmap(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_unmap\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_unmap *)ctsio->cdb;
-
len = scsi_2btoul(cdb->length);
byte2 = cdb->byte2;
@@ -5751,7 +5802,6 @@ ctl_unmap(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5829,24 +5879,20 @@ int
ctl_default_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
- struct ctl_lun *lun;
- uint8_t *current_cp, *saved_cp;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
+ uint8_t *current_cp;
int set_ua;
uint32_t initidx;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
set_ua = 0;
current_cp = (page_index->page_data + (page_index->page_len *
CTL_PAGE_CURRENT));
- saved_cp = (page_index->page_data + (page_index->page_len *
- CTL_PAGE_SAVED));
mtx_lock(&lun->lun_lock);
if (memcmp(current_cp, page_ptr, page_index->page_len)) {
memcpy(current_cp, page_ptr, page_index->page_len);
- memcpy(saved_cp, page_ptr, page_index->page_len);
set_ua = 1;
}
if (set_ua != 0)
@@ -5887,13 +5933,12 @@ int
ctl_ie_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_info_exceptions_page *pg;
- struct ctl_lun *lun;
uint64_t t;
(void)ctl_default_page_handler(ctsio, page_index, page_ptr);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
pg = (struct scsi_info_exceptions_page *)page_ptr;
mtx_lock(&lun->lun_lock);
if (pg->info_flags & SIEP_FLAGS_TEST) {
@@ -5930,19 +5975,18 @@ ctl_ie_page_handler(struct ctl_scsiio *ctsio,
static int
ctl_do_mode_select(union ctl_io *io)
{
+ struct ctl_lun *lun = CTL_LUN(io);
struct scsi_mode_page_header *page_header;
struct ctl_page_index *page_index;
struct ctl_scsiio *ctsio;
int page_len, page_len_offset, page_len_size;
union ctl_modepage_info *modepage_info;
- struct ctl_lun *lun;
- int *len_left, *len_used;
+ uint16_t *len_left, *len_used;
int retval, i;
ctsio = &io->scsiio;
page_index = NULL;
page_len = 0;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
modepage_info = (union ctl_modepage_info *)
ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
@@ -6155,10 +6199,12 @@ bailout_no_done:
int
ctl_mode_select(struct ctl_scsiio *ctsio)
{
- int param_len, pf, sp;
- int header_size, bd_len;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
union ctl_modepage_info *modepage_info;
+ int bd_len, i, header_size, param_len, pf, rtd, sp;
+ uint32_t initidx;
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
switch (ctsio->cdb[0]) {
case MODE_SELECT_6: {
struct scsi_mode_select_6 *cdb;
@@ -6166,6 +6212,7 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+ rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
param_len = cdb->length;
header_size = sizeof(struct scsi_mode_header_6);
@@ -6177,6 +6224,7 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+ rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
param_len = scsi_2btoul(cdb->length);
header_size = sizeof(struct scsi_mode_header_10);
@@ -6188,6 +6236,30 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
+ if (rtd) {
+ if (param_len != 0) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+ /*command*/ 1, /*field*/ 0,
+ /*bit_valid*/ 0, /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /* Revert to defaults. */
+ ctl_init_page_index(lun);
+ mtx_lock(&lun->lun_lock);
+ ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
+ mtx_unlock(&lun->lun_lock);
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ ctl_isc_announce_mode(lun, -1,
+ lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
+ lun->mode_pages.index[i].subpage);
+ }
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
/*
* From SPC-3:
* "A parameter list length of zero indicates that the Data-Out Buffer
@@ -6219,7 +6291,6 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
ctsio->kern_data_len = param_len;
ctsio->kern_total_len = param_len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -6276,7 +6347,7 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
int
ctl_mode_sense(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
int pc, page_code, dbd, llba, subpage;
int alloc_len, page_len, header_len, total_len;
struct scsi_mode_block_descr *block_desc;
@@ -6288,7 +6359,6 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
switch (ctsio->cdb[0]) {
case MODE_SENSE_6: {
struct scsi_mode_sense_6 *cdb;
@@ -6450,17 +6520,9 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
switch (ctsio->cdb[0]) {
case MODE_SENSE_6: {
@@ -6624,12 +6686,11 @@ ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_log_param_header *phdr;
uint8_t *data;
uint64_t val;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = page_index->page_data;
if (lun->backend->lun_attr != NULL &&
@@ -6693,41 +6754,31 @@ ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct stat_page *data;
- uint64_t rn, wn, rb, wb;
- struct bintime rt, wt;
- int i;
+ struct bintime *t;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = (struct stat_page *)page_index->page_data;
scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
data->sap.hdr.param_control = SLP_LBIN;
data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
sizeof(struct scsi_log_param_header);
- rn = wn = rb = wb = 0;
- bintime_clear(&rt);
- bintime_clear(&wt);
- for (i = 0; i < CTL_MAX_PORTS; i++) {
- rn += lun->stats.ports[i].operations[CTL_STATS_READ];
- wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
- rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
- wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
- bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
- bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
- }
- scsi_u64to8b(rn, data->sap.read_num);
- scsi_u64to8b(wn, data->sap.write_num);
- if (lun->stats.blocksize > 0) {
- scsi_u64to8b(wb / lun->stats.blocksize,
- data->sap.recvieved_lba);
- scsi_u64to8b(rb / lun->stats.blocksize,
- data->sap.transmitted_lba);
- }
- scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
+ scsi_u64to8b(lun->stats.operations[CTL_STATS_READ],
+ data->sap.read_num);
+ scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE],
+ data->sap.write_num);
+ if (lun->be_lun->blocksize > 0) {
+ scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] /
+ lun->be_lun->blocksize, data->sap.recvieved_lba);
+ scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] /
+ lun->be_lun->blocksize, data->sap.transmitted_lba);
+ }
+ t = &lun->stats.time[CTL_STATS_READ];
+ scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
data->sap.read_int);
- scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
+ t = &lun->stats.time[CTL_STATS_WRITE];
+ scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
data->sap.write_int);
scsi_u64to8b(0, data->sap.weighted_num);
scsi_u64to8b(0, data->sap.weighted_int);
@@ -6752,10 +6803,9 @@ ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_log_informational_exceptions *data;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = (struct scsi_log_informational_exceptions *)page_index->page_data;
scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code);
@@ -6771,7 +6821,7 @@ ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
int
ctl_log_sense(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
int i, pc, page_code, subpage;
int alloc_len, total_len;
struct ctl_page_index *page_index;
@@ -6780,7 +6830,6 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_log_sense\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_log_sense *)ctsio->cdb;
pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
page_code = cdb->page & SLS_PAGE_CODE;
@@ -6816,17 +6865,9 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
header = (struct scsi_log_header *)ctsio->kern_data_ptr;
header->page = page_index->page_code;
@@ -6857,9 +6898,9 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
int
ctl_read_capacity(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_read_capacity *cdb;
struct scsi_read_capacity_data *data;
- struct ctl_lun *lun;
uint32_t lba;
CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
@@ -6879,14 +6920,10 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
- ctsio->residual = 0;
ctsio->kern_data_len = sizeof(*data);
ctsio->kern_total_len = sizeof(*data);
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@@ -6915,9 +6952,9 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
int
ctl_read_capacity_16(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_read_capacity_16 *cdb;
struct scsi_read_capacity_data_long *data;
- struct ctl_lun *lun;
uint64_t lba;
uint32_t alloc_len;
@@ -6940,23 +6977,12 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
-
- if (sizeof(*data) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*data);
- ctsio->kern_data_len = sizeof(*data);
- ctsio->kern_total_len = sizeof(*data);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*data), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
scsi_u64to8b(lun->be_lun->maxlba, data->addr);
/* XXX KDM this may not be 512 bytes... */
@@ -6976,9 +7002,9 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
int
ctl_get_lba_status(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_get_lba_status *cdb;
struct scsi_get_lba_status_data *data;
- struct ctl_lun *lun;
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t alloc_len, total_len;
@@ -6986,7 +7012,6 @@ ctl_get_lba_status(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_get_lba_status *)ctsio->cdb;
lba = scsi_8btou64(cdb->addr);
alloc_len = scsi_4btoul(cdb->alloc_len);
@@ -7000,19 +7025,10 @@ ctl_get_lba_status(struct ctl_scsiio *ctsio)
total_len = sizeof(*data) + sizeof(data->descr[0]);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/* Fill dummy data in case backend can't tell anything. */
scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
@@ -7063,18 +7079,10 @@ ctl_read_defect(struct ctl_scsiio *ctsio)
}
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
data10 = (struct scsi_read_defect_data_hdr_10 *)
@@ -7099,12 +7107,12 @@ ctl_read_defect(struct ctl_scsiio *ctsio)
int
ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_maintenance_in *cdb;
int retval;
int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
int num_ha_groups, num_target_ports, shared_group;
- struct ctl_lun *lun;
- struct ctl_softc *softc;
struct ctl_port *port;
struct scsi_target_group_data *rtg_ptr;
struct scsi_target_group_data_extended *rtg_ext_ptr;
@@ -7113,9 +7121,6 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
cdb = (struct scsi_maintenance_in *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
-
retval = CTL_RETVAL_COMPLETE;
switch (cdb->byte2 & STG_PDF_MASK) {
@@ -7142,7 +7147,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
STAILQ_FOREACH(port, &softc->port_list, links) {
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
num_target_ports++;
if (port->status & CTL_PORT_STATUS_HA_SHARED)
@@ -7162,20 +7167,10 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
if (ext) {
rtg_ext_ptr = (struct scsi_target_group_data_extended *)
@@ -7234,7 +7229,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
if (!softc->is_single &&
(port->status & CTL_PORT_STATUS_HA_SHARED) == 0)
continue;
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
relative_target_port_identifier);
@@ -7259,7 +7254,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
continue;
if (port->status & CTL_PORT_STATUS_HA_SHARED)
continue;
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
relative_target_port_identifier);
@@ -7281,7 +7276,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
int
ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_report_supported_opcodes *cdb;
const struct ctl_cmd_entry *entry, *sentry;
struct scsi_report_supported_opcodes_all *all;
@@ -7294,8 +7289,6 @@ ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
opcode = cdb->requested_opcode;
@@ -7365,20 +7358,10 @@ ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
switch (cdb->options & RSO_OPTIONS_MASK) {
case RSO_OPTIONS_ALL:
@@ -7479,20 +7462,10 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr;
data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
@@ -7527,20 +7500,10 @@ ctl_report_timestamp(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
scsi_ulto2b(sizeof(*data) - 2, data->length);
@@ -7560,11 +7523,11 @@ ctl_report_timestamp(struct ctl_scsiio *ctsio)
int
ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_per_res_in *cdb;
int alloc_len, total_len = 0;
/* struct scsi_per_res_in_rsrv in_data; */
- struct ctl_lun *lun;
- struct ctl_softc *softc;
uint64_t key;
CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
@@ -7573,9 +7536,6 @@ ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
alloc_len = scsi_2btoul(cdb->length);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
-
retry:
mtx_lock(&lun->lun_lock);
switch (cdb->action) {
@@ -7604,20 +7564,10 @@ retry:
mtx_unlock(&lun->lun_lock);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
-
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
mtx_lock(&lun->lun_lock);
switch (cdb->action) {
@@ -8138,12 +8088,12 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
int
ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
int retval;
u_int32_t param_len;
struct scsi_per_res_out *cdb;
- struct ctl_lun *lun;
struct scsi_per_res_out_parms* param;
- struct ctl_softc *softc;
uint32_t residx;
uint64_t res_key, sa_res_key, key;
uint8_t type;
@@ -8152,11 +8102,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
- retval = CTL_RETVAL_COMPLETE;
-
cdb = (struct scsi_per_res_out *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
+ retval = CTL_RETVAL_COMPLETE;
/*
* We only support whole-LUN scope. The scope & type are ignored for
@@ -8196,7 +8143,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
ctsio->kern_data_len = param_len;
ctsio->kern_total_len = param_len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -8530,17 +8476,18 @@ done:
* in sync.
*/
static void
-ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
+ctl_hndl_per_res_out_on_other_sc(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
+ union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg;
struct ctl_lun *lun;
int i;
uint32_t residx, targ_lun;
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
- ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
@@ -8651,15 +8598,13 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
int
ctl_read_write(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int flags, retval;
int isread;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
flags = 0;
@@ -8844,15 +8789,14 @@ ctl_read_write(struct ctl_scsiio *ctsio)
static int
ctl_cnw_cont(union ctl_io *io)
{
+ struct ctl_lun *lun = CTL_LUN(io);
struct ctl_scsiio *ctsio;
- struct ctl_lun *lun;
struct ctl_lba_len_flags *lbalen;
int retval;
ctsio = &io->scsiio;
ctsio->io_hdr.status = CTL_STATUS_NONE;
ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
lbalen = (struct ctl_lba_len_flags *)
&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
lbalen->flags &= ~CTL_LLF_COMPARE;
@@ -8866,14 +8810,12 @@ ctl_cnw_cont(union ctl_io *io)
int
ctl_cnw(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int flags, retval;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
flags = 0;
@@ -8954,15 +8896,13 @@ ctl_cnw(struct ctl_scsiio *ctsio)
int
ctl_verify(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int bytchk, flags;
int retval;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
bytchk = 0;
@@ -9058,27 +8998,25 @@ ctl_verify(struct ctl_scsiio *ctsio)
int
ctl_report_luns(struct ctl_scsiio *ctsio)
{
- struct ctl_softc *softc;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio);
struct scsi_report_luns *cdb;
struct scsi_report_luns_data *lun_data;
- struct ctl_lun *lun, *request_lun;
- struct ctl_port *port;
- int num_luns, retval;
+ int num_filled, num_luns, num_port_luns, retval;
uint32_t alloc_len, lun_datalen;
- int num_filled;
uint32_t initidx, targ_lun_id, lun_id;
retval = CTL_RETVAL_COMPLETE;
cdb = (struct scsi_report_luns *)ctsio->cdb;
- port = ctl_io_port(&ctsio->io_hdr);
- softc = port->ctl_softc;
CTL_DEBUG_PRINT(("ctl_report_luns\n"));
- mtx_lock(&softc->ctl_lock);
num_luns = 0;
- for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) {
- if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS)
+ num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS;
+ mtx_lock(&softc->ctl_lock);
+ for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
+ if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)
num_luns++;
}
mtx_unlock(&softc->ctl_lock);
@@ -9123,9 +9061,6 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
return (retval);
}
- request_lun = (struct ctl_lun *)
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
lun_datalen = sizeof(*lun_data) +
(num_luns * sizeof(struct scsi_report_luns_lundata));
@@ -9136,9 +9071,11 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
- for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
+ for (targ_lun_id = 0, num_filled = 0;
+ targ_lun_id < num_port_luns && num_filled < num_luns;
+ targ_lun_id++) {
lun_id = ctl_lun_map_from_port(port, targ_lun_id);
- if (lun_id >= CTL_MAX_LUNS)
+ if (lun_id == UINT32_MAX)
continue;
lun = softc->ctl_luns[lun_id];
if (lun == NULL)
@@ -9177,19 +9114,10 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
*/
lun_datalen = sizeof(*lun_data) +
(num_filled * sizeof(struct scsi_report_luns_lundata));
-
- if (lun_datalen < alloc_len) {
- ctsio->residual = alloc_len - lun_datalen;
- ctsio->kern_data_len = lun_datalen;
- ctsio->kern_total_len = lun_datalen;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(lun_datalen, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* We set this to the actual data length, regardless of how much
@@ -9214,10 +9142,10 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
int
ctl_request_sense(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_request_sense *cdb;
struct scsi_sense_data *sense_ptr;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
uint32_t initidx;
int have_error;
u_int sense_len = SSD_FULL_SIZE;
@@ -9227,9 +9155,6 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
cdb = (struct scsi_request_sense *)ctsio->cdb;
- softc = control_softc;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_request_sense\n"));
/*
@@ -9243,20 +9168,16 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_rel_offset = 0;
/*
* struct scsi_sense_data, which is currently set to 256 bytes, is
* larger than the largest allowed value for the length field in the
* REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
*/
- ctsio->residual = 0;
ctsio->kern_data_len = cdb->length;
ctsio->kern_total_len = cdb->length;
- ctsio->kern_data_resid = 0;
- ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
-
/*
* If we don't have a LUN, we don't have any pending sense.
*/
@@ -9372,31 +9293,19 @@ ctl_tur(struct ctl_scsiio *ctsio)
static int
ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_supported_pages *pages;
int sup_page_size;
- struct ctl_lun *lun;
int p;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
SCSI_EVPD_NUM_SUPPORTED_PAGES;
ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (sup_page_size < alloc_len) {
- ctsio->residual = alloc_len - sup_page_size;
- ctsio->kern_data_len = sup_page_size;
- ctsio->kern_total_len = sup_page_size;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sup_page_size, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9447,27 +9356,17 @@ ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_unit_serial_number *sn_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = 4 + CTL_SN_LEN;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9506,29 +9405,17 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_extended_inquiry_data *eid_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9558,6 +9445,11 @@ ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
eid_ptr->flags4 = SVPD_EID_LUICLR;
/*
+ * We support revert to defaults (RTD) bit in MODE SELECT.
+ */
+ eid_ptr->flags5 = SVPD_EID_RTD_SUP;
+
+ /*
* XXX KDM in order to correctly answer this, we would need
* information from the SIM to determine how much sense data it
* can send. So this would really be a path inquiry field, most
@@ -9577,31 +9469,19 @@ ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_mode_page_policy *mpp_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_mode_page_policy) +
sizeof(struct scsi_vpd_mode_page_policy_descr);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9631,19 +9511,14 @@ ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_device_id *devid_ptr;
struct scsi_vpd_id_descriptor *desc;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
- struct ctl_port *port;
int data_len, g;
uint8_t proto;
- softc = control_softc;
-
- port = ctl_io_port(&ctsio->io_hdr);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_device_id) +
sizeof(struct scsi_vpd_id_descriptor) +
sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
@@ -9659,19 +9534,10 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9756,16 +9622,14 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_scsi_ports *sp;
struct scsi_vpd_port_designation *pd;
struct scsi_vpd_port_designation_cont *pdc;
- struct ctl_lun *lun;
struct ctl_port *port;
int data_len, num_target_ports, iid_len, id_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
num_target_ports = 0;
iid_len = 0;
id_len = 0;
@@ -9774,7 +9638,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
if (lun != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
num_target_ports++;
if (port->init_devid)
@@ -9790,19 +9654,10 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9825,7 +9680,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
if (lun != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
scsi_ulto2b(port->targ_port, pd->relative_port_id);
if (port->init_devid) {
@@ -9859,28 +9714,17 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_block_limits *bl_ptr;
- struct ctl_lun *lun;
uint64_t ival;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (sizeof(*bl_ptr) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*bl_ptr);
- ctsio->kern_data_len = sizeof(*bl_ptr);
- ctsio->kern_total_len = sizeof(*bl_ptr);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9936,29 +9780,17 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_block_device_characteristics *bdc_ptr;
- struct ctl_lun *lun;
const char *value;
u_int i;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (sizeof(*bdc_ptr) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*bdc_ptr);
- ctsio->kern_data_len = sizeof(*bdc_ptr);
- ctsio->kern_total_len = sizeof(*bdc_ptr);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9996,28 +9828,16 @@ ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_logical_block_prov *lbp_ptr;
- struct ctl_lun *lun;
const char *value;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (sizeof(*lbp_ptr) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*lbp_ptr);
- ctsio->kern_data_len = sizeof(*lbp_ptr);
- ctsio->kern_total_len = sizeof(*lbp_ptr);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -10059,11 +9879,10 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_inquiry *cdb;
int alloc_len, retval;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_inquiry *)ctsio->cdb;
alloc_len = scsi_2btoul(cdb->length);
@@ -10126,21 +9945,19 @@ err:
static int
ctl_inquiry_std(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_inquiry_data *inq_ptr;
struct scsi_inquiry *cdb;
- struct ctl_softc *softc = control_softc;
- struct ctl_port *port;
- struct ctl_lun *lun;
char *val;
uint32_t alloc_len, data_len;
ctl_port_type port_type;
- port = ctl_io_port(&ctsio->io_hdr);
port_type = port->port_type;
if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
port_type = CTL_PORT_SCSI;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_inquiry *)ctsio->cdb;
alloc_len = scsi_2btoul(cdb->length);
@@ -10153,18 +9970,9 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
if (lun != NULL) {
if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
@@ -10354,14 +10162,13 @@ ctl_inquiry(struct ctl_scsiio *ctsio)
int
ctl_get_config(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_get_config_header *hdr;
struct scsi_get_config_feature *feature;
struct scsi_get_config *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
int rt, starting;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_get_config *)ctsio->cdb;
rt = (cdb->rt & SGC_RT_MASK);
starting = scsi_2btoul(cdb->starting_feature);
@@ -10382,7 +10189,6 @@ ctl_get_config(struct ctl_scsiio *ctsio)
sizeof(struct scsi_get_config_feature) + 4;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
@@ -10550,15 +10356,8 @@ done:
data_len = (uint8_t *)feature - (uint8_t *)hdr;
}
scsi_ulto4b(data_len - 4, hdr->data_length);
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
ctl_set_success(ctsio);
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -10572,11 +10371,9 @@ ctl_get_event_status(struct ctl_scsiio *ctsio)
{
struct scsi_get_event_status_header *hdr;
struct scsi_get_event_status *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
int notif_class;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_get_event_status *)ctsio->cdb;
if ((cdb->byte2 & SGESN_POLLED) == 0) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
@@ -10590,18 +10387,9 @@ ctl_get_event_status(struct ctl_scsiio *ctsio)
data_len = sizeof(struct scsi_get_event_status_header);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr;
scsi_ulto2b(0, hdr->descr_length);
@@ -10620,28 +10408,17 @@ ctl_mechanism_status(struct ctl_scsiio *ctsio)
{
struct scsi_mechanism_status_header *hdr;
struct scsi_mechanism_status *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_mechanism_status *)ctsio->cdb;
alloc_len = scsi_2btoul(cdb->length);
data_len = sizeof(struct scsi_mechanism_status_header);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr;
hdr->state1 = 0x00;
@@ -10671,14 +10448,13 @@ ctl_ultomsf(uint32_t lba, uint8_t *buf)
int
ctl_read_toc(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_read_toc_hdr *hdr;
struct scsi_read_toc_type01_descr *descr;
struct scsi_read_toc *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
int format, msf;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_read_toc *)ctsio->cdb;
msf = (cdb->byte2 & CD_MSF) != 0;
format = cdb->format;
@@ -10691,18 +10467,9 @@ ctl_read_toc(struct ctl_scsiio *ctsio)
data_len += sizeof(struct scsi_read_toc_type01_descr);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr;
if (format == 0) {
@@ -11425,7 +11192,7 @@ ctl_failover_io(union ctl_io *io, int have_lock)
static void
ctl_failover_lun(union ctl_io *rio)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(rio);
struct ctl_lun *lun;
struct ctl_io_hdr *io, *next_io;
uint32_t targ_lun;
@@ -11435,18 +11202,17 @@ ctl_failover_lun(union ctl_io *rio)
/* Find and lock the LUN. */
mtx_lock(&softc->ctl_lock);
- if ((targ_lun < CTL_MAX_LUNS) &&
- ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
- mtx_lock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
- if (lun->flags & CTL_LUN_DISABLED) {
- mtx_unlock(&lun->lun_lock);
- return;
- }
- } else {
+ if (targ_lun > CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
+ return;
+ }
if (softc->ha_mode == CTL_HA_MODE_XFER) {
TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
@@ -11514,15 +11280,13 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
struct ctl_lun *lun;
const struct ctl_cmd_entry *entry;
uint32_t initidx, targ_lun;
- int retval;
-
- retval = 0;
+ int retval = 0;
lun = NULL;
-
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
- if ((targ_lun < CTL_MAX_LUNS)
- && ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
+ if (targ_lun < CTL_MAX_LUNS)
+ lun = softc->ctl_luns[targ_lun];
+ if (lun) {
/*
* If the LUN is invalid, pretend that it doesn't exist.
* It will go away as soon as all pending I/O has been
@@ -11532,29 +11296,21 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
if (lun->flags & CTL_LUN_DISABLED) {
mtx_unlock(&lun->lun_lock);
lun = NULL;
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
- } else {
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
- lun->be_lun;
+ }
+ }
+ CTL_LUN(ctsio) = lun;
+ if (lun) {
+ CTL_BACKEND_LUN(ctsio) = lun->be_lun;
- /*
- * Every I/O goes into the OOA queue for a
- * particular LUN, and stays there until completion.
- */
+ /*
+ * Every I/O goes into the OOA queue for a particular LUN,
+ * and stays there until completion.
+ */
#ifdef CTL_TIME_IO
- if (TAILQ_EMPTY(&lun->ooa_queue)) {
- lun->idle_time += getsbinuptime() -
- lun->last_busy;
- }
+ if (TAILQ_EMPTY(&lun->ooa_queue))
+ lun->idle_time += getsbinuptime() - lun->last_busy;
#endif
- TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
- ooa_links);
- }
- } else {
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
}
/* Get command entry and return error if it is unsuppotyed. */
@@ -11858,7 +11614,7 @@ static int
ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
ctl_ua_type ua_type)
{
- struct ctl_port *port;
+ struct ctl_port *port = CTL_PORT(io);
struct ctl_lun *lun;
int retval;
@@ -11879,10 +11635,9 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
retval = 0;
mtx_lock(&softc->ctl_lock);
- port = ctl_io_port(&io->io_hdr);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (port != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
retval += ctl_do_lun_reset(lun, io, ua_type);
}
@@ -11955,8 +11710,10 @@ ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
ctl_clear_mask(lun->have_ca, i);
#endif
lun->prevent_count = 0;
- for (i = 0; i < CTL_MAX_INITIATORS; i++)
- ctl_clear_mask(lun->prevent, i);
+ if (lun->prevent) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++)
+ ctl_clear_mask(lun->prevent, i);
+ }
mtx_unlock(&lun->lun_lock);
return (0);
@@ -11971,7 +11728,7 @@ ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io)
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12041,7 +11798,7 @@ ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
static int
ctl_abort_task_set(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
uint32_t targ_lun;
@@ -12050,7 +11807,7 @@ ctl_abort_task_set(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12075,7 +11832,7 @@ ctl_abort_task_set(union ctl_io *io)
static int
ctl_i_t_nexus_reset(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
uint32_t initidx;
@@ -12102,7 +11859,7 @@ ctl_i_t_nexus_reset(union ctl_io *io)
#endif
if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
lun->flags &= ~CTL_LUN_RESERVED;
- if (ctl_is_set(lun->prevent, initidx)) {
+ if (lun->prevent && ctl_is_set(lun->prevent, initidx)) {
ctl_clear_mask(lun->prevent, initidx);
lun->prevent_count--;
}
@@ -12117,9 +11874,9 @@ ctl_i_t_nexus_reset(union ctl_io *io)
static int
ctl_abort_task(union ctl_io *io)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
union ctl_io *xio;
struct ctl_lun *lun;
- struct ctl_softc *softc;
#if 0
struct sbuf sb;
char printbuf[128];
@@ -12127,7 +11884,6 @@ ctl_abort_task(union ctl_io *io)
int found;
uint32_t targ_lun;
- softc = control_softc;
found = 0;
/*
@@ -12135,7 +11891,7 @@ ctl_abort_task(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12251,16 +12007,15 @@ ctl_abort_task(union ctl_io *io)
static int
ctl_query_task(union ctl_io *io, int task_set)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
union ctl_io *xio;
struct ctl_lun *lun;
- struct ctl_softc *softc;
int found = 0;
uint32_t targ_lun;
- softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12292,15 +12047,14 @@ ctl_query_task(union ctl_io *io, int task_set)
static int
ctl_query_async_event(union ctl_io *io)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
- struct ctl_softc *softc;
ctl_ua_type ua;
uint32_t targ_lun, initidx;
- softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12321,7 +12075,7 @@ ctl_query_async_event(union ctl_io *io)
static void
ctl_run_task(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
int retval = 1;
CTL_DEBUG_PRINT(("ctl_run_task\n"));
@@ -12383,29 +12137,25 @@ ctl_run_task(union ctl_io *io)
static void
ctl_handle_isc(union ctl_io *io)
{
- int free_io;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
- struct ctl_softc *softc = control_softc;
+ const struct ctl_cmd_entry *entry;
uint32_t targ_lun;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
- lun = softc->ctl_luns[targ_lun];
-
switch (io->io_hdr.msg_type) {
case CTL_MSG_SERIALIZE:
- free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
+ ctl_serialize_other_sc_cmd(&io->scsiio);
break;
- case CTL_MSG_R2R: {
- const struct ctl_cmd_entry *entry;
-
- /*
- * This is only used in SER_ONLY mode.
- */
- free_io = 0;
+ case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */
entry = ctl_get_cmd_entry(&io->scsiio, NULL);
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
+ ctl_done(io);
+ break;
+ }
mtx_lock(&lun->lun_lock);
- if (ctl_scsiio_lun_check(lun,
- entry, (struct ctl_scsiio *)io) != 0) {
+ if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
mtx_unlock(&lun->lun_lock);
ctl_done(io);
break;
@@ -12414,51 +12164,45 @@ ctl_handle_isc(union ctl_io *io)
mtx_unlock(&lun->lun_lock);
ctl_enqueue_rtr(io);
break;
- }
case CTL_MSG_FINISH_IO:
if (softc->ha_mode == CTL_HA_MODE_XFER) {
- free_io = 0;
ctl_done(io);
- } else {
- free_io = 1;
- mtx_lock(&lun->lun_lock);
- TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
- ooa_links);
- ctl_check_blocked(lun);
- mtx_unlock(&lun->lun_lock);
+ break;
+ }
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
+ ctl_free_io(io);
+ break;
}
+ mtx_lock(&lun->lun_lock);
+ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+ ctl_check_blocked(lun);
+ mtx_unlock(&lun->lun_lock);
+ ctl_free_io(io);
break;
case CTL_MSG_PERS_ACTION:
- ctl_hndl_per_res_out_on_other_sc(
- (union ctl_ha_msg *)&io->presio.pr_msg);
- free_io = 1;
+ ctl_hndl_per_res_out_on_other_sc(io);
+ ctl_free_io(io);
break;
case CTL_MSG_BAD_JUJU:
- free_io = 0;
ctl_done(io);
break;
- case CTL_MSG_DATAMOVE:
- /* Only used in XFER mode */
- free_io = 0;
+ case CTL_MSG_DATAMOVE: /* Only used in XFER mode */
ctl_datamove_remote(io);
break;
- case CTL_MSG_DATAMOVE_DONE:
- /* Only used in XFER mode */
- free_io = 0;
+ case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */
io->scsiio.be_move_done(io);
break;
case CTL_MSG_FAILOVER:
ctl_failover_lun(io);
- free_io = 1;
+ ctl_free_io(io);
break;
default:
- free_io = 1;
printf("%s: Invalid message type %d\n",
__func__, io->io_hdr.msg_type);
+ ctl_free_io(io);
break;
}
- if (free_io)
- ctl_free_io(io);
}
@@ -12610,14 +12354,15 @@ ctl_datamove_timer_wakeup(void *arg)
void
ctl_datamove(union ctl_io *io)
{
- struct ctl_lun *lun;
void (*fe_datamove)(union ctl_io *io);
- mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
+ mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
CTL_DEBUG_PRINT(("ctl_datamove\n"));
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ /* No data transferred yet. Frontend must update this when done. */
+ io->scsiio.kern_data_resid = io->scsiio.kern_data_len;
+
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
char str[256];
@@ -12698,7 +12443,7 @@ ctl_datamove(union ctl_io *io)
return;
}
- fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
+ fe_datamove = CTL_PORT(io)->fe_datamove;
fe_datamove(io);
}
@@ -12716,15 +12461,14 @@ ctl_send_datamove_done(union ctl_io *io, int have_lock)
msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
msg.hdr.nexus = io->io_hdr.nexus;
msg.hdr.status = io->io_hdr.status;
+ msg.scsi.kern_data_resid = io->scsiio.kern_data_resid;
msg.scsi.tag_num = io->scsiio.tag_num;
msg.scsi.tag_type = io->scsiio.tag_type;
msg.scsi.scsi_status = io->scsiio.scsi_status;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
io->scsiio.sense_len);
msg.scsi.sense_len = io->scsiio.sense_len;
- msg.scsi.sense_residual = io->scsiio.sense_residual;
- msg.scsi.fetd_status = io->io_hdr.port_status;
- msg.scsi.residual = io->scsiio.residual;
+ msg.scsi.port_status = io->io_hdr.port_status;
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
ctl_failover_io(io, /*have_lock*/ have_lock);
@@ -12816,7 +12560,7 @@ ctl_datamove_remote_write(union ctl_io *io)
*/
io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
- fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
+ fe_datamove = CTL_PORT(io)->fe_datamove;
fe_datamove(io);
}
@@ -12891,7 +12635,7 @@ ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
/* XXX KDM add checks like the ones in ctl_datamove? */
- fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
+ fe_datamove = CTL_PORT(io)->fe_datamove;
fe_datamove(io);
}
@@ -13107,7 +12851,7 @@ static void
ctl_datamove_remote(union ctl_io *io)
{
- mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
+ mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
ctl_failover_io(io, /*have_lock*/ 0);
@@ -13143,14 +12887,14 @@ ctl_datamove_remote(union ctl_io *io)
static void
ctl_process_done(union ctl_io *io)
{
- struct ctl_lun *lun;
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
+ struct ctl_port *port = CTL_PORT(io);
+ struct ctl_lun *lun = CTL_LUN(io);
void (*fe_done)(union ctl_io *io);
union ctl_ha_msg msg;
- uint32_t targ_port = io->io_hdr.nexus.targ_port;
CTL_DEBUG_PRINT(("ctl_process_done\n"));
- fe_done = softc->ctl_ports[targ_port]->fe_done;
+ fe_done = port->fe_done;
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
@@ -13200,7 +12944,6 @@ ctl_process_done(union ctl_io *io)
__func__, io->io_hdr.io_type);
}
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if (lun == NULL) {
CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
io->io_hdr.nexus.targ_mapped_lun));
@@ -13254,11 +12997,13 @@ ctl_process_done(union ctl_io *io)
*/
if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
io->io_hdr.io_type == CTL_IO_SCSI) {
-#ifdef CTL_TIME_IO
- struct bintime cur_bt;
-#endif
int type;
+#ifdef CTL_TIME_IO
+ struct bintime bt;
+ getbinuptime(&bt);
+ bintime_sub(&bt, &io->io_hdr.start_bt);
+#endif
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN)
type = CTL_STATS_READ;
@@ -13268,18 +13013,38 @@ ctl_process_done(union ctl_io *io)
else
type = CTL_STATS_NO_IO;
- lun->stats.ports[targ_port].bytes[type] +=
+#ifdef CTL_LEGACY_STATS
+ uint32_t targ_port = port->targ_port;
+ lun->legacy_stats.ports[targ_port].bytes[type] +=
io->scsiio.kern_total_len;
- lun->stats.ports[targ_port].operations[type]++;
+ lun->legacy_stats.ports[targ_port].operations[type] ++;
+ lun->legacy_stats.ports[targ_port].num_dmas[type] +=
+ io->io_hdr.num_dmas;
#ifdef CTL_TIME_IO
- bintime_add(&lun->stats.ports[targ_port].dma_time[type],
+ bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type],
&io->io_hdr.dma_bt);
- getbinuptime(&cur_bt);
- bintime_sub(&cur_bt, &io->io_hdr.start_bt);
- bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
+ bintime_add(&lun->legacy_stats.ports[targ_port].time[type],
+ &bt);
#endif
- lun->stats.ports[targ_port].num_dmas[type] +=
- io->io_hdr.num_dmas;
+#endif /* CTL_LEGACY_STATS */
+
+ lun->stats.bytes[type] += io->scsiio.kern_total_len;
+ lun->stats.operations[type] ++;
+ lun->stats.dmas[type] += io->io_hdr.num_dmas;
+#ifdef CTL_TIME_IO
+ bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt);
+ bintime_add(&lun->stats.time[type], &bt);
+#endif
+
+ mtx_lock(&port->port_lock);
+ port->stats.bytes[type] += io->scsiio.kern_total_len;
+ port->stats.operations[type] ++;
+ port->stats.dmas[type] += io->io_hdr.num_dmas;
+#ifdef CTL_TIME_IO
+ bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt);
+ bintime_add(&port->stats.time[type], &bt);
+#endif
+ mtx_unlock(&port->port_lock);
}
/*
@@ -13354,42 +13119,36 @@ bailout:
int
ctl_queue_sense(union ctl_io *io)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
+ struct ctl_port *port = CTL_PORT(io);
struct ctl_lun *lun;
- struct ctl_port *port;
- struct ctl_softc *softc;
uint32_t initidx, targ_lun;
- softc = control_softc;
-
CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
+ targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
+
/*
* LUN lookup will likely move to the ctl_work_thread() once we
* have our new queueing infrastructure (that doesn't put things on
* a per-LUN queue initially). That is so that we can handle
* things like an INQUIRY to a LUN that we don't have enabled. We
* can't deal with that right now.
+ * If we don't have a LUN for this, just toss the sense information.
*/
mtx_lock(&softc->ctl_lock);
-
- /*
- * If we don't have a LUN for this, just toss the sense
- * information.
- */
- port = ctl_io_port(&ctsio->io_hdr);
- targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
- if ((targ_lun < CTL_MAX_LUNS)
- && (softc->ctl_luns[targ_lun] != NULL))
- lun = softc->ctl_luns[targ_lun];
- else
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
+ mtx_unlock(&softc->ctl_lock);
goto bailout;
-
- initidx = ctl_get_initindex(&io->io_hdr.nexus);
-
+ }
mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+
/*
* Already have CA set for this LUN...toss the sense information.
*/
+ initidx = ctl_get_initindex(&io->io_hdr.nexus);
if (ctl_is_set(lun->have_ca, initidx)) {
mtx_unlock(&lun->lun_lock);
goto bailout;
@@ -13402,10 +13161,7 @@ ctl_queue_sense(union ctl_io *io)
mtx_unlock(&lun->lun_lock);
bailout:
- mtx_unlock(&softc->ctl_lock);
-
ctl_free_io(io);
-
return (CTL_RETVAL_COMPLETE);
}
#endif
@@ -13417,7 +13173,7 @@ bailout:
int
ctl_queue(union ctl_io *io)
{
- struct ctl_port *port;
+ struct ctl_port *port = CTL_PORT(io);
CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
@@ -13427,7 +13183,6 @@ ctl_queue(union ctl_io *io)
#endif /* CTL_TIME_IO */
/* Map FE-specific LUN ID into global one. */
- port = ctl_io_port(&io->io_hdr);
io->io_hdr.nexus.targ_mapped_lun =
ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
@@ -13460,9 +13215,8 @@ ctl_done_timer_wakeup(void *arg)
void
ctl_serseq_done(union ctl_io *io)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(io);;
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if (lun->be_lun == NULL ||
lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
return;
@@ -13512,9 +13266,7 @@ ctl_done(union ctl_io *io)
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
} else {
- struct ctl_lun *lun;
-
- lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ struct ctl_lun *lun = CTL_LUN(io);
if ((lun != NULL)
&& (lun->delay_info.done_delay > 0)) {
@@ -13544,7 +13296,7 @@ ctl_work_thread(void *arg)
CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
- for (;;) {
+ while (!softc->shutdown) {
/*
* We handle the queues in this order:
* - ISC
@@ -13594,6 +13346,8 @@ ctl_work_thread(void *arg)
/* Sleep until we have something to do. */
mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
}
+ thr->thread = NULL;
+ kthread_exit();
}
static void
@@ -13604,7 +13358,7 @@ ctl_lun_thread(void *arg)
CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
- for (;;) {
+ while (!softc->shutdown) {
mtx_lock(&softc->ctl_lock);
be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
if (be_lun != NULL) {
@@ -13618,6 +13372,8 @@ ctl_lun_thread(void *arg)
mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
PDROP | PRIBIO, "-", 0);
}
+ softc->lun_thread = NULL;
+ kthread_exit();
}
static void
@@ -13633,7 +13389,7 @@ ctl_thresh_thread(void *arg)
CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
- for (;;) {
+ while (!softc->shutdown) {
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if ((lun->flags & CTL_LUN_DISABLED) ||
@@ -13718,15 +13474,17 @@ ctl_thresh_thread(void *arg)
mtx_lock(&softc->ctl_lock);
}
}
- mtx_unlock(&softc->ctl_lock);
- pause("-", CTL_LBP_PERIOD * hz);
+ mtx_sleep(&softc->thresh_thread, &softc->ctl_lock,
+ PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz);
}
+ softc->thresh_thread = NULL;
+ kthread_exit();
}
static void
ctl_enqueue_incoming(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
u_int idx;
@@ -13742,7 +13500,7 @@ ctl_enqueue_incoming(union ctl_io *io)
static void
ctl_enqueue_rtr(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
@@ -13755,7 +13513,7 @@ ctl_enqueue_rtr(union ctl_io *io)
static void
ctl_enqueue_done(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
@@ -13768,7 +13526,7 @@ ctl_enqueue_done(union ctl_io *io)
static void
ctl_enqueue_isc(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
index 7b4d06c..d9a4f5a 100644
--- a/sys/cam/ctl/ctl.h
+++ b/sys/cam/ctl/ctl.h
@@ -74,18 +74,12 @@ struct ctl_port_entry {
struct ctl_modepage_header {
uint8_t page_code;
uint8_t subpage;
- int32_t len_used;
- int32_t len_left;
-};
-
-struct ctl_modepage_aps {
- struct ctl_modepage_header header;
- uint8_t lock_active;
+ uint16_t len_used;
+ uint16_t len_left;
};
union ctl_modepage_info {
struct ctl_modepage_header header;
- struct ctl_modepage_aps aps;
};
/*
diff --git a/sys/cam/ctl/ctl_backend.c b/sys/cam/ctl/ctl_backend.c
index 86f7d3c..bac7e85 100644
--- a/sys/cam/ctl/ctl_backend.c
+++ b/sys/cam/ctl/ctl_backend.c
@@ -67,11 +67,10 @@ ctl_backend_register(struct ctl_backend_driver *be)
{
struct ctl_softc *softc = control_softc;
struct ctl_backend_driver *be_tmp;
+ int error;
+ /* Sanity check, make sure this isn't a duplicate registration. */
mtx_lock(&softc->ctl_lock);
- /*
- * Sanity check, make sure this isn't a duplicate registration.
- */
STAILQ_FOREACH(be_tmp, &softc->be_list, links) {
if (strcmp(be_tmp->name, be->name) == 0) {
mtx_unlock(&softc->ctl_lock);
@@ -79,39 +78,24 @@ ctl_backend_register(struct ctl_backend_driver *be)
}
}
mtx_unlock(&softc->ctl_lock);
-
- /*
- * Call the backend's initialization routine.
- */
- be->init();
-
- mtx_lock(&softc->ctl_lock);
-
- STAILQ_INSERT_TAIL(&softc->be_list, be, links);
-
- softc->num_backends++;
-
- /*
- * Don't want to increment the usage count for internal consumers,
- * we won't be able to unload otherwise.
- */
- /* XXX KDM find a substitute for this? */
-#if 0
- if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
- MOD_INC_USE_COUNT;
-#endif
-
#ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
be->config_move_done = ctl_config_move_done;
#endif
- /* XXX KDM fix this! */
be->num_luns = 0;
-#if 0
- atomic_set(&be->num_luns, 0);
-#endif
- mtx_unlock(&softc->ctl_lock);
+ /* Call the backend's initialization routine. */
+ if (be->init != NULL) {
+ if ((error = be->init()) != 0) {
+ printf("%s backend init error: %d\n",
+ be->name, error);
+ return (error);
+ }
+ }
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_INSERT_TAIL(&softc->be_list, be, links);
+ softc->num_backends++;
+ mtx_unlock(&softc->ctl_lock);
return (0);
}
@@ -119,30 +103,21 @@ int
ctl_backend_deregister(struct ctl_backend_driver *be)
{
struct ctl_softc *softc = control_softc;
-
- mtx_lock(&softc->ctl_lock);
-
-#if 0
- if (atomic_read(&be->num_luns) != 0) {
-#endif
- /* XXX KDM fix this! */
- if (be->num_luns != 0) {
- mtx_unlock(&softc->ctl_lock);
- return (-1);
+ int error;
+
+ /* Call the backend's shutdown routine. */
+ if (be->shutdown != NULL) {
+ if ((error = be->shutdown()) != 0) {
+ printf("%s backend shutdown error: %d\n",
+ be->name, error);
+ return (error);
+ }
}
+ mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->be_list, be, ctl_backend_driver, links);
-
softc->num_backends--;
-
- /* XXX KDM find a substitute for this? */
-#if 0
- if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
- MOD_DEC_USE_COUNT;
-#endif
-
mtx_unlock(&softc->ctl_lock);
-
return (0);
}
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index 4177e2d..4202efc 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -1,6 +1,6 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
- * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,54 +40,7 @@
#ifndef _CTL_BACKEND_H_
#define _CTL_BACKEND_H_
-/*
- * XXX KDM move this to another header file?
- */
-#define CTL_BE_NAME_LEN 32
-
-/*
- * The ID_REQ flag is used to say that the caller has requested a
- * particular LUN ID in the req_lun_id field. If we cannot allocate that
- * LUN ID, the ctl_add_lun() call will fail.
- *
- * The STOPPED flag tells us that the LUN should default to the powered
- * off state. It will return 0x04,0x02 until it is powered up. ("Logical
- * unit not ready, initializing command required.")
- *
- * The NO_MEDIA flag tells us that the LUN has no media inserted.
- *
- * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
- * which is accessible via the Master shelf controller in an HA. This flag
- * being set indicates a Primary LUN. This flag being reset represents a
- * Secondary LUN controlled by the Secondary controller in an HA
- * configuration. Flag is applicable at this time to T_DIRECT types.
- *
- * The SERIAL_NUM flag tells us that the serial_num field is filled in and
- * valid for use in SCSI INQUIRY VPD page 0x80.
- *
- * The DEVID flag tells us that the device_id field is filled in and
- * valid for use in SCSI INQUIRY VPD page 0x83.
- *
- * The DEV_TYPE flag tells us that the device_type field is filled in.
- *
- * The EJECTED flag tells us that the removable LUN has tray open.
- *
- * The UNMAP flag tells us that this LUN supports UNMAP.
- *
- * The OFFLINE flag tells us that this LUN can not access backing store.
- */
-typedef enum {
- CTL_LUN_FLAG_ID_REQ = 0x01,
- CTL_LUN_FLAG_STOPPED = 0x02,
- CTL_LUN_FLAG_NO_MEDIA = 0x04,
- CTL_LUN_FLAG_PRIMARY = 0x08,
- CTL_LUN_FLAG_SERIAL_NUM = 0x10,
- CTL_LUN_FLAG_DEVID = 0x20,
- CTL_LUN_FLAG_DEV_TYPE = 0x40,
- CTL_LUN_FLAG_UNMAP = 0x80,
- CTL_LUN_FLAG_EJECTED = 0x100,
- CTL_LUN_FLAG_READONLY = 0x200
-} ctl_backend_lun_flags;
+#include <cam/ctl/ctl_ioctl.h>
typedef enum {
CTL_LUN_SERSEQ_OFF,
@@ -102,12 +55,13 @@ typedef enum {
{ \
switch (type) { \
case MOD_LOAD: \
- ctl_backend_register( \
- (struct ctl_backend_driver *)data); \
+ return (ctl_backend_register( \
+ (struct ctl_backend_driver *)data)); \
break; \
case MOD_UNLOAD: \
- printf(#name " module unload - not possible for this module type\n"); \
- return EINVAL; \
+ return (ctl_backend_deregister( \
+ (struct ctl_backend_driver *)data)); \
+ break; \
default: \
return EOPNOTSUPP; \
} \
@@ -226,10 +180,10 @@ struct ctl_be_lun {
typedef enum {
CTL_BE_FLAG_NONE = 0x00, /* no flags */
CTL_BE_FLAG_HAS_CONFIG = 0x01, /* can do config reads, writes */
- CTL_BE_FLAG_INTERNAL = 0x02 /* don't inc mod refcount */
} ctl_backend_flags;
typedef int (*be_init_t)(void);
+typedef int (*be_shutdown_t)(void);
typedef int (*be_func_t)(union ctl_io *io);
typedef void (*be_vfunc_t)(union ctl_io *io);
typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
@@ -241,6 +195,7 @@ struct ctl_backend_driver {
char name[CTL_BE_NAME_LEN]; /* passed to CTL */
ctl_backend_flags flags; /* passed to CTL */
be_init_t init; /* passed to CTL */
+ be_shutdown_t shutdown; /* passed to CTL */
be_func_t data_submit; /* passed to CTL */
be_func_t data_move_done; /* passed to CTL */
be_func_t config_read; /* passed to CTL */
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index a184360..4a6141f 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -183,6 +183,7 @@ struct ctl_be_block_lun {
*/
struct ctl_be_block_softc {
struct mtx lock;
+ uma_zone_t beio_zone;
int num_luns;
STAILQ_HEAD(, ctl_be_block_lun) lun_list;
};
@@ -273,13 +274,15 @@ static int ctl_be_block_config_write(union ctl_io *io);
static int ctl_be_block_config_read(union ctl_io *io);
static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname);
-int ctl_be_block_init(void);
+static int ctl_be_block_init(void);
+static int ctl_be_block_shutdown(void);
static struct ctl_backend_driver ctl_be_block_driver =
{
.name = "block",
.flags = CTL_BE_FLAG_HAS_CONFIG,
.init = ctl_be_block_init,
+ .shutdown = ctl_be_block_shutdown,
.data_submit = ctl_be_block_submit,
.data_move_done = ctl_be_block_move_done,
.config_read = ctl_be_block_config_read,
@@ -292,14 +295,12 @@ static struct ctl_backend_driver ctl_be_block_driver =
MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
-static uma_zone_t beio_zone;
-
static struct ctl_be_block_io *
ctl_alloc_beio(struct ctl_be_block_softc *softc)
{
struct ctl_be_block_io *beio;
- beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO);
+ beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO);
beio->softc = softc;
return (beio);
}
@@ -332,7 +333,7 @@ ctl_free_beio(struct ctl_be_block_io *beio)
duplicate_free, beio->num_segs);
}
- uma_zfree(beio_zone, beio);
+ uma_zfree(beio->softc->beio_zone, beio);
}
static void
@@ -419,6 +420,16 @@ ctl_be_block_move_done(union ctl_io *io)
*/
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
;
+ } else if ((io->io_hdr.port_status != 0) &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+ /*retry_count*/ io->io_hdr.port_status);
+ } else if (io->scsiio.kern_data_resid != 0 &&
+ (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_invalid_field_ciu(&io->scsiio);
} else if ((io->io_hdr.port_status == 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
lbalen = ARGS(beio->io);
@@ -428,21 +439,6 @@ ctl_be_block_move_done(union ctl_io *io)
/* We have two data blocks ready for comparison. */
ctl_be_block_compare(io);
}
- } else if ((io->io_hdr.port_status != 0) &&
- ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
- (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
- /*
- * For hardware error sense keys, the sense key
- * specific value is defined to be a retry count,
- * but we use it to pass back an internal FETD
- * error code. XXX KDM Hopefully the FETD is only
- * using 16 bits for an error code, since that's
- * all the space we have in the sks field.
- */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/
- io->io_hdr.port_status);
}
/*
@@ -1634,7 +1630,6 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
else
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
- io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -1751,8 +1746,7 @@ ctl_be_block_submit(union ctl_io *io)
DPRINTF("entered\n");
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
+ cbe_lun = CTL_BACKEND_LUN(io);
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
/*
@@ -2717,8 +2711,7 @@ ctl_be_block_config_write(union ctl_io *io)
DPRINTF("entered\n");
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
+ cbe_lun = CTL_BACKEND_LUN(io);
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
retval = 0;
@@ -2803,8 +2796,7 @@ ctl_be_block_config_read(union ctl_io *io)
DPRINTF("entered\n");
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
+ cbe_lun = CTL_BACKEND_LUN(io);
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
switch (io->scsiio.cdb[0]) {
@@ -2868,19 +2860,40 @@ ctl_be_block_lun_attr(void *be_lun, const char *attrname)
return (lun->getattr(lun, attrname));
}
-int
+static int
ctl_be_block_init(void)
{
- struct ctl_be_block_softc *softc;
- int retval;
-
- softc = &backend_block_softc;
- retval = 0;
+ struct ctl_be_block_softc *softc = &backend_block_softc;
mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
- beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
+ softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
STAILQ_INIT(&softc->lun_list);
+ return (0);
+}
- return (retval);
+
+static int
+ctl_be_block_shutdown(void)
+{
+ struct ctl_be_block_softc *softc = &backend_block_softc;
+ struct ctl_be_block_lun *lun, *next_lun;
+
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
+ /*
+ * Drop our lock here. Since ctl_invalidate_lun() can call
+ * back into us, this could potentially lead to a recursive
+ * lock of the same mutex, which would cause a hang.
+ */
+ mtx_unlock(&softc->lock);
+ ctl_disable_lun(&lun->cbe_lun);
+ ctl_invalidate_lun(&lun->cbe_lun);
+ mtx_lock(&softc->lock);
+ }
+ mtx_unlock(&softc->lock);
+
+ uma_zdestroy(softc->beio_zone);
+ mtx_destroy(&softc->lock);
+ return (0);
}
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index d170446..b743bc6 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2003, 2008 Silicon Graphics International Corp.
* Copyright (c) 2012 The FreeBSD Foundation
- * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed by Edward Tomasz Napierala
@@ -35,7 +35,7 @@
* $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
*/
/*
- * CAM Target Layer backend for a "fake" ramdisk.
+ * CAM Target Layer black hole and RAM disk backend.
*
* Author: Ken Merry <ken@FreeBSD.org>
*/
@@ -48,9 +48,11 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/condvar.h>
#include <sys/types.h>
+#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
+#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <sys/time.h>
#include <sys/queue.h>
@@ -71,6 +73,29 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_error.h>
+#define PRIV(io) \
+ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
+#define ARGS(io) \
+ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
+
+#define PPP (PAGE_SIZE / sizeof(uint8_t **))
+#ifdef __LP64__
+#define PPPS (PAGE_SHIFT - 3)
+#else
+#define PPPS (PAGE_SHIFT - 2)
+#endif
+#define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry))
+
+#define P_UNMAPPED NULL /* Page is unmapped. */
+#define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */
+
+typedef enum {
+ GP_READ, /* Return data page or zero page. */
+ GP_WRITE, /* Return data page, try allocate if none. */
+ GP_ANCHOR, /* Return data page, try anchor if none. */
+ GP_OTHER, /* Return what present, do not allocate/anchor. */
+} getpage_op_t;
+
typedef enum {
CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
@@ -79,28 +104,29 @@ typedef enum {
struct ctl_be_ramdisk_lun {
struct ctl_lun_create_params params;
- char lunname[32];
- uint64_t size_bytes;
- uint64_t size_blocks;
+ char lunname[32];
+ int indir;
+ uint8_t **pages;
+ uint8_t *zero_page;
+ struct sx page_lock;
+ u_int pblocksize;
+ u_int pblockmul;
+ uint64_t size_bytes;
+ uint64_t size_blocks;
+ uint64_t cap_bytes;
+ uint64_t cap_used;
struct ctl_be_ramdisk_softc *softc;
ctl_be_ramdisk_lun_flags flags;
STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
- struct ctl_be_lun cbe_lun;
- struct taskqueue *io_taskqueue;
- struct task io_task;
+ struct ctl_be_lun cbe_lun;
+ struct taskqueue *io_taskqueue;
+ struct task io_task;
STAILQ_HEAD(, ctl_io_hdr) cont_queue;
- struct mtx_padalign queue_lock;
+ struct mtx_padalign queue_lock;
};
struct ctl_be_ramdisk_softc {
struct mtx lock;
- int rd_size;
-#ifdef CTL_RAMDISK_PAGES
- uint8_t **ramdisk_pages;
- int num_pages;
-#else
- uint8_t *ramdisk_buffer;
-#endif
int num_luns;
STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
};
@@ -108,11 +134,16 @@ struct ctl_be_ramdisk_softc {
static struct ctl_be_ramdisk_softc rd_softc;
extern struct ctl_softc *control_softc;
-int ctl_backend_ramdisk_init(void);
-void ctl_backend_ramdisk_shutdown(void);
+static int ctl_backend_ramdisk_init(void);
+static int ctl_backend_ramdisk_shutdown(void);
static int ctl_backend_ramdisk_move_done(union ctl_io *io);
+static void ctl_backend_ramdisk_compare(union ctl_io *io);
+static void ctl_backend_ramdisk_rw(union ctl_io *io);
static int ctl_backend_ramdisk_submit(union ctl_io *io);
-static void ctl_backend_ramdisk_continue(union ctl_io *io);
+static void ctl_backend_ramdisk_worker(void *context, int pending);
+static int ctl_backend_ramdisk_config_read(union ctl_io *io);
+static int ctl_backend_ramdisk_config_write(union ctl_io *io);
+static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname);
static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
caddr_t addr, int flag, struct thread *td);
static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
@@ -121,63 +152,43 @@ static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_req *req);
static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_req *req);
-static void ctl_backend_ramdisk_worker(void *context, int pending);
static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
ctl_lun_config_status status);
-static int ctl_backend_ramdisk_config_write(union ctl_io *io);
-static int ctl_backend_ramdisk_config_read(union ctl_io *io);
static struct ctl_backend_driver ctl_be_ramdisk_driver =
{
.name = "ramdisk",
.flags = CTL_BE_FLAG_HAS_CONFIG,
.init = ctl_backend_ramdisk_init,
+ .shutdown = ctl_backend_ramdisk_shutdown,
.data_submit = ctl_backend_ramdisk_submit,
.data_move_done = ctl_backend_ramdisk_move_done,
.config_read = ctl_backend_ramdisk_config_read,
.config_write = ctl_backend_ramdisk_config_write,
- .ioctl = ctl_backend_ramdisk_ioctl
+ .ioctl = ctl_backend_ramdisk_ioctl,
+ .lun_attr = ctl_backend_ramdisk_lun_attr,
};
MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
-int
+static int
ctl_backend_ramdisk_init(void)
{
struct ctl_be_ramdisk_softc *softc = &rd_softc;
-#ifdef CTL_RAMDISK_PAGES
- int i;
-#endif
memset(softc, 0, sizeof(*softc));
mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
STAILQ_INIT(&softc->lun_list);
- softc->rd_size = 1024 * 1024;
-#ifdef CTL_RAMDISK_PAGES
- softc->num_pages = softc->rd_size / PAGE_SIZE;
- softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
- softc->num_pages, M_RAMDISK,
- M_WAITOK);
- for (i = 0; i < softc->num_pages; i++)
- softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
-#else
- softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
- M_WAITOK);
-#endif
-
return (0);
}
-void
+static int
ctl_backend_ramdisk_shutdown(void)
{
struct ctl_be_ramdisk_softc *softc = &rd_softc;
struct ctl_be_ramdisk_lun *lun, *next_lun;
-#ifdef CTL_RAMDISK_PAGES
- int i;
-#endif
mtx_lock(&softc->lock);
STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
@@ -192,35 +203,210 @@ ctl_backend_ramdisk_shutdown(void)
mtx_lock(&softc->lock);
}
mtx_unlock(&softc->lock);
-
-#ifdef CTL_RAMDISK_PAGES
- for (i = 0; i < softc->num_pages; i++)
- free(softc->ramdisk_pages[i], M_RAMDISK);
+ mtx_destroy(&softc->lock);
+ return (0);
+}
- free(softc->ramdisk_pages, M_RAMDISK);
-#else
- free(softc->ramdisk_buffer, M_RAMDISK);
-#endif
+static uint8_t *
+ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
+ getpage_op_t op)
+{
+ uint8_t **p, ***pp;
+ off_t i;
+ int s;
+
+ if (be_lun->cap_bytes == 0) {
+ switch (op) {
+ case GP_READ:
+ return (be_lun->zero_page);
+ case GP_WRITE:
+ return ((uint8_t *)be_lun->pages);
+ case GP_ANCHOR:
+ return (P_ANCHORED);
+ default:
+ return (P_UNMAPPED);
+ }
+ }
+ if (op == GP_WRITE || op == GP_ANCHOR) {
+ sx_xlock(&be_lun->page_lock);
+ pp = &be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (*pp == NULL) {
+ *pp = malloc(PAGE_SIZE, M_RAMDISK,
+ M_WAITOK|M_ZERO);
+ }
+ i = pn >> s;
+ pp = (uint8_t ***)&(*pp)[i];
+ pn -= i << s;
+ }
+ if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
+ if (op == GP_WRITE) {
+ *pp = malloc(be_lun->pblocksize, M_RAMDISK,
+ M_WAITOK|M_ZERO);
+ } else
+ *pp = P_ANCHORED;
+ be_lun->cap_used += be_lun->pblocksize;
+ } else if (*pp == P_ANCHORED && op == GP_WRITE) {
+ *pp = malloc(be_lun->pblocksize, M_RAMDISK,
+ M_WAITOK|M_ZERO);
+ }
+ sx_xunlock(&be_lun->page_lock);
+ return ((uint8_t *)*pp);
+ } else {
+ sx_slock(&be_lun->page_lock);
+ p = be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (p == NULL)
+ break;
+ i = pn >> s;
+ p = (uint8_t **)p[i];
+ pn -= i << s;
+ }
+ sx_sunlock(&be_lun->page_lock);
+ if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
+ return (be_lun->zero_page);
+ return ((uint8_t *)p);
+ }
+};
- if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
- printf("ctl_backend_ramdisk_shutdown: "
- "ctl_backend_deregister() failed!\n");
+static void
+ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
+{
+ uint8_t ***pp;
+ off_t i;
+ int s;
+
+ if (be_lun->cap_bytes == 0)
+ return;
+ sx_xlock(&be_lun->page_lock);
+ pp = &be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (*pp == NULL)
+ goto noindir;
+ i = pn >> s;
+ pp = (uint8_t ***)&(*pp)[i];
+ pn -= i << s;
}
+ if (*pp == P_ANCHORED) {
+ be_lun->cap_used -= be_lun->pblocksize;
+ *pp = P_UNMAPPED;
+ } else if (*pp != P_UNMAPPED) {
+ free(*pp, M_RAMDISK);
+ be_lun->cap_used -= be_lun->pblocksize;
+ *pp = P_UNMAPPED;
+ }
+noindir:
+ sx_xunlock(&be_lun->page_lock);
+};
+
+static void
+ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
+{
+ uint8_t ***pp;
+ off_t i;
+ int s;
+
+ if (be_lun->cap_bytes == 0)
+ return;
+ sx_xlock(&be_lun->page_lock);
+ pp = &be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (*pp == NULL)
+ goto noindir;
+ i = pn >> s;
+ pp = (uint8_t ***)&(*pp)[i];
+ pn -= i << s;
+ }
+ if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
+ be_lun->cap_used += be_lun->pblocksize;
+ *pp = P_ANCHORED;
+ } else if (*pp != P_ANCHORED) {
+ free(*pp, M_RAMDISK);
+ *pp = P_ANCHORED;
+ }
+noindir:
+ sx_xunlock(&be_lun->page_lock);
+};
+
+static void
+ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
+{
+ int i;
+
+ if (p == NULL)
+ return;
+ if (indir == 0) {
+ free(p, M_RAMDISK);
+ return;
+ }
+ for (i = 0; i < PPP; i++) {
+ if (p[i] == NULL)
+ continue;
+ ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
+ }
+ free(p, M_RAMDISK);
+};
+
+static size_t
+cmp(uint8_t *a, uint8_t *b, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (a[i] != b[i])
+ break;
+ }
+ return (i);
+}
+
+static int
+ctl_backend_ramdisk_cmp(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ uint8_t *page;
+ uint8_t info[8];
+ uint64_t lba;
+ u_int lbaoff, lbas, res, off;
+
+ lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
+ lba = ARGS(io)->lba + PRIV(io)->len - lbas;
+ off = 0;
+ for (; lbas > 0; lbas--, lba++) {
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ lba >> cbe_lun->pblockexp, GP_READ);
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ page += lbaoff * cbe_lun->blocksize;
+ res = cmp(io->scsiio.kern_data_ptr + off, page,
+ cbe_lun->blocksize);
+ off += res;
+ if (res < cbe_lun->blocksize)
+ break;
+ }
+ if (lbas > 0) {
+ off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
+ scsi_u64to8b(off, info);
+ ctl_set_sense(&io->scsiio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_MISCOMPARE,
+ /*asc*/ 0x1D, /*ascq*/ 0x00,
+ /*type*/ SSD_ELEM_INFO,
+ /*size*/ sizeof(info), /*data*/ &info,
+ /*type*/ SSD_ELEM_NONE);
+ return (1);
+ }
+ return (0);
}
static int
ctl_backend_ramdisk_move_done(union ctl_io *io)
{
- struct ctl_be_lun *cbe_lun;
- struct ctl_be_ramdisk_lun *be_lun;
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
#ifdef CTL_TIME_IO
struct bintime cur_bt;
#endif
CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
- be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
#ifdef CTL_TIME_IO
getbinuptime(&cur_bt);
bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
@@ -232,9 +418,24 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
;
+ } else if (io->io_hdr.port_status != 0 &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+ /*retry_count*/ io->io_hdr.port_status);
+ } else if (io->scsiio.kern_data_resid != 0 &&
+ (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_invalid_field_ciu(&io->scsiio);
} else if ((io->io_hdr.port_status == 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
- if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
+ if (ARGS(io)->flags & CTL_LLF_COMPARE) {
+ /* We have data block ready for comparison. */
+ if (ctl_backend_ramdisk_cmp(io))
+ goto done;
+ }
+ if (ARGS(io)->len > PRIV(io)->len) {
mtx_lock(&be_lun->queue_lock);
STAILQ_INSERT_TAIL(&be_lun->cont_queue,
&io->io_hdr, links);
@@ -244,93 +445,110 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
return (0);
}
ctl_set_success(&io->scsiio);
- } else if ((io->io_hdr.port_status != 0) &&
- ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
- (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
- /*
- * For hardware error sense keys, the sense key
- * specific value is defined to be a retry count,
- * but we use it to pass back an internal FETD
- * error code. XXX KDM Hopefully the FETD is only
- * using 16 bits for an error code, since that's
- * all the space we have in the sks field.
- */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/
- io->io_hdr.port_status);
}
+done:
ctl_data_submit_done(io);
return(0);
}
-static int
-ctl_backend_ramdisk_submit(union ctl_io *io)
+static void
+ctl_backend_ramdisk_compare(union ctl_io *io)
{
- struct ctl_be_lun *cbe_lun;
- struct ctl_lba_len_flags *lbalen;
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ u_int lbas, len;
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
- lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
- if (lbalen->flags & CTL_LLF_VERIFY) {
- ctl_set_success(&io->scsiio);
- ctl_data_submit_done(io);
- return (CTL_RETVAL_COMPLETE);
- }
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
- lbalen->len * cbe_lun->blocksize;
- ctl_backend_ramdisk_continue(io);
- return (CTL_RETVAL_COMPLETE);
+ lbas = ARGS(io)->len - PRIV(io)->len;
+ lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
+ len = lbas * cbe_lun->blocksize;
+
+ io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
+ io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
+ io->scsiio.kern_data_len = len;
+ io->scsiio.kern_sg_entries = 0;
+ io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ PRIV(io)->len += lbas;
+#ifdef CTL_TIME_IO
+ getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
}
static void
-ctl_backend_ramdisk_continue(union ctl_io *io)
+ctl_backend_ramdisk_rw(union ctl_io *io)
{
- struct ctl_be_ramdisk_softc *softc;
- int len, len_filled, sg_filled;
-#ifdef CTL_RAMDISK_PAGES
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
struct ctl_sg_entry *sg_entries;
- int i;
-#endif
-
- softc = &rd_softc;
- len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
-#ifdef CTL_RAMDISK_PAGES
- sg_filled = min(btoc(len), softc->num_pages);
- if (sg_filled > 1) {
+ uint8_t *page;
+ uint64_t lba;
+ u_int i, len, lbaoff, lbas, sgs, off;
+ getpage_op_t op;
+
+ lba = ARGS(io)->lba + PRIV(io)->len;
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ lbas = ARGS(io)->len - PRIV(io)->len;
+ lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
+ sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
+ off = lbaoff * cbe_lun->blocksize;
+ op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
+ if (sgs > 1) {
io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
- sg_filled, M_RAMDISK,
- M_WAITOK);
+ sgs, M_RAMDISK, M_WAITOK);
sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
- for (i = 0, len_filled = 0; i < sg_filled; i++) {
- sg_entries[i].addr = softc->ramdisk_pages[i];
- sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
- len_filled += sg_entries[i].len;
+ len = lbas * cbe_lun->blocksize;
+ for (i = 0; i < sgs; i++) {
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ (lba >> cbe_lun->pblockexp) + i, op);
+ if (page == P_UNMAPPED || page == P_ANCHORED) {
+ free(io->scsiio.kern_data_ptr, M_RAMDISK);
+nospc:
+ ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_data_submit_done(io);
+ return;
+ }
+ sg_entries[i].addr = page + off;
+ sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
+ len -= sg_entries[i].len;
+ off = 0;
}
} else {
- sg_filled = 0;
- len_filled = len;
- io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ lba >> cbe_lun->pblockexp, op);
+ if (page == P_UNMAPPED || page == P_ANCHORED)
+ goto nospc;
+ sgs = 0;
+ io->scsiio.kern_data_ptr = page + off;
}
-#else
- sg_filled = 0;
- len_filled = min(len, softc->rd_size);
- io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
-#endif /* CTL_RAMDISK_PAGES */
io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
- io->scsiio.kern_data_resid = 0;
- io->scsiio.kern_data_len = len_filled;
- io->scsiio.kern_sg_entries = sg_filled;
+ io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
+ io->scsiio.kern_sg_entries = sgs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
+ PRIV(io)->len += lbas;
#ifdef CTL_TIME_IO
getbinuptime(&io->io_hdr.dma_start_bt);
#endif
ctl_datamove(io);
}
+static int
+ctl_backend_ramdisk_submit(union ctl_io *io)
+{
+ struct ctl_lba_len_flags *lbalen = ARGS(io);
+
+ if (lbalen->flags & CTL_LLF_VERIFY) {
+ ctl_set_success(&io->scsiio);
+ ctl_data_submit_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ PRIV(io)->len = 0;
+ if (lbalen->flags & CTL_LLF_COMPARE)
+ ctl_backend_ramdisk_compare(io);
+ else
+ ctl_backend_ramdisk_rw(io);
+ return (CTL_RETVAL_COMPLETE);
+}
+
static void
ctl_backend_ramdisk_worker(void *context, int pending)
{
@@ -338,7 +556,6 @@ ctl_backend_ramdisk_worker(void *context, int pending)
union ctl_io *io;
be_lun = (struct ctl_be_ramdisk_lun *)context;
-
mtx_lock(&be_lun->queue_lock);
for (;;) {
io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
@@ -346,7 +563,10 @@ ctl_backend_ramdisk_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
- ctl_backend_ramdisk_continue(io);
+ if (ARGS(io)->flags & CTL_LLF_COMPARE)
+ ctl_backend_ramdisk_compare(io);
+ else
+ ctl_backend_ramdisk_rw(io);
mtx_lock(&be_lun->queue_lock);
continue;
}
@@ -361,6 +581,259 @@ ctl_backend_ramdisk_worker(void *context, int pending)
}
static int
+ctl_backend_ramdisk_gls(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ struct scsi_get_lba_status_data *data;
+ uint8_t *page;
+ u_int lbaoff;
+
+ data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
+ scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
+ lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
+ if (page == P_UNMAPPED)
+ data->descr[0].status = 1;
+ else if (page == P_ANCHORED)
+ data->descr[0].status = 2;
+ else
+ data->descr[0].status = 0;
+ ctl_config_read_done(io);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_backend_ramdisk_config_read(union ctl_io *io)
+{
+ int retval = 0;
+
+ switch (io->scsiio.cdb[0]) {
+ case SERVICE_ACTION_IN:
+ if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
+ retval = ctl_backend_ramdisk_gls(io);
+ break;
+ }
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_config_read_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ default:
+ ctl_set_invalid_opcode(&io->scsiio);
+ ctl_config_read_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+ return (retval);
+}
+
+static void
+ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
+ int anchor)
+{
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ uint8_t *page;
+ uint64_t p, lp;
+ u_int lbaoff;
+ getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
+
+ /* Partially zero first partial page. */
+ p = lba >> cbe_lun->pblockexp;
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ if (lbaoff != 0) {
+ page = ctl_backend_ramdisk_getpage(be_lun, p, op);
+ if (page != P_UNMAPPED && page != P_ANCHORED) {
+ memset(page + lbaoff * cbe_lun->blocksize, 0,
+ min(len, be_lun->pblockmul - lbaoff) *
+ cbe_lun->blocksize);
+ }
+ p++;
+ }
+
+ /* Partially zero last partial page. */
+ lp = (lba + len) >> cbe_lun->pblockexp;
+ lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
+ if (p <= lp && lbaoff != 0) {
+ page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
+ if (page != P_UNMAPPED && page != P_ANCHORED)
+ memset(page, 0, lbaoff * cbe_lun->blocksize);
+ }
+
+ /* Delete remaining full pages. */
+ if (anchor) {
+ for (; p < lp; p++)
+ ctl_backend_ramdisk_anchorpage(be_lun, p);
+ } else {
+ for (; p < lp; p++)
+ ctl_backend_ramdisk_unmappage(be_lun, p);
+ }
+}
+
+static void
+ctl_backend_ramdisk_ws(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ struct ctl_lba_len_flags *lbalen = ARGS(io);
+ uint8_t *page;
+ uint64_t lba;
+ u_int lbaoff, lbas;
+
+ if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_config_write_done(io);
+ return;
+ }
+ if (lbalen->flags & SWS_UNMAP) {
+ ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
+ (lbalen->flags & SWS_ANCHOR) != 0);
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ return;
+ }
+
+ for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ lba >> cbe_lun->pblockexp, GP_WRITE);
+ if (page == P_UNMAPPED || page == P_ANCHORED) {
+ ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_data_submit_done(io);
+ return;
+ }
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ page += lbaoff * cbe_lun->blocksize;
+ if (lbalen->flags & SWS_NDOB) {
+ memset(page, 0, cbe_lun->blocksize);
+ } else {
+ memcpy(page, io->scsiio.kern_data_ptr,
+ cbe_lun->blocksize);
+ }
+ if (lbalen->flags & SWS_LBDATA)
+ scsi_ulto4b(lba, page);
+ }
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+}
+
+static void
+ctl_backend_ramdisk_unmap(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
+ struct scsi_unmap_desc *buf, *end;
+
+ if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 0,
+ /*command*/ 0,
+ /*field*/ 0,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_config_write_done(io);
+ return;
+ }
+
+ buf = (struct scsi_unmap_desc *)ptrlen->ptr;
+ end = buf + ptrlen->len / sizeof(*buf);
+ for (; buf < end; buf++) {
+ ctl_backend_ramdisk_delete(cbe_lun,
+ scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
+ (ptrlen->flags & SU_ANCHOR) != 0);
+ }
+
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+}
+
+static int
+ctl_backend_ramdisk_config_write(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ int retval = 0;
+
+ switch (io->scsiio.cdb[0]) {
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ /* We have no cache to flush. */
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ case START_STOP_UNIT: {
+ struct scsi_start_stop_unit *cdb;
+
+ cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
+ if ((cdb->how & SSS_PC_MASK) != 0) {
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ }
+ if (cdb->how & SSS_START) {
+ if (cdb->how & SSS_LOEJ)
+ ctl_lun_has_media(cbe_lun);
+ ctl_start_lun(cbe_lun);
+ } else {
+ ctl_stop_lun(cbe_lun);
+ if (cdb->how & SSS_LOEJ)
+ ctl_lun_ejected(cbe_lun);
+ }
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ }
+ case PREVENT_ALLOW:
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ case WRITE_SAME_10:
+ case WRITE_SAME_16:
+ ctl_backend_ramdisk_ws(io);
+ break;
+ case UNMAP:
+ ctl_backend_ramdisk_unmap(io);
+ break;
+ default:
+ ctl_set_invalid_opcode(&io->scsiio);
+ ctl_config_write_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+
+ return (retval);
+}
+
+static uint64_t
+ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname)
+{
+ struct ctl_be_ramdisk_lun *be_lun = arg;
+ uint64_t val;
+
+ val = UINT64_MAX;
+ if (be_lun->cap_bytes == 0)
+ return (val);
+ sx_slock(&be_lun->page_lock);
+ if (strcmp(attrname, "blocksused") == 0) {
+ val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
+ } else if (strcmp(attrname, "blocksavail") == 0) {
+ val = (be_lun->cap_bytes - be_lun->cap_used) /
+ be_lun->cbe_lun.blocksize;
+ }
+ sx_sunlock(&be_lun->page_lock);
+ return (val);
+}
+
+static int
ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
int flag, struct thread *td)
{
@@ -477,6 +950,9 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
taskqueue_drain_all(be_lun->io_taskqueue);
taskqueue_free(be_lun->io_taskqueue);
ctl_free_opts(&be_lun->cbe_lun.options);
+ free(be_lun->zero_page, M_RAMDISK);
+ ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
+ sx_destroy(&be_lun->page_lock);
mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
@@ -498,6 +974,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_create_params *params;
char *value;
char tmpstr[32];
+ uint64_t t;
int retval;
retval = 0;
@@ -524,6 +1001,19 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ be_lun->pblocksize = PAGE_SIZE;
+ value = ctl_get_opt(&cbe_lun->options, "pblocksize");
+ if (value != NULL) {
+ ctl_expand_number(value, &t);
+ be_lun->pblocksize = t;
+ }
+ if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: unsupported pblocksize %u", __func__,
+ be_lun->pblocksize);
+ goto bailout_error;
+ }
+
if (cbe_lun->lun_type == T_DIRECT ||
cbe_lun->lun_type == T_CDROM) {
if (params->blocksize_bytes != 0)
@@ -532,6 +1022,14 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
cbe_lun->blocksize = 2048;
else
cbe_lun->blocksize = 512;
+ be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
+ if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: pblocksize %u not exp2 of blocksize %u",
+ __func__,
+ be_lun->pblocksize, cbe_lun->blocksize);
+ goto bailout_error;
+ }
if (params->lun_size_bytes < cbe_lun->blocksize) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: LUN size %ju < blocksize %u", __func__,
@@ -540,9 +1038,25 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
}
be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
+ be_lun->indir = 0;
+ t = be_lun->size_bytes / be_lun->pblocksize;
+ while (t > 1) {
+ t /= PPP;
+ be_lun->indir++;
+ }
cbe_lun->maxlba = be_lun->size_blocks - 1;
- cbe_lun->atomicblock = UINT32_MAX;
- cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
+ cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
+ cbe_lun->pblockoff = 0;
+ cbe_lun->ublockexp = cbe_lun->pblockexp;
+ cbe_lun->ublockoff = 0;
+ cbe_lun->atomicblock = be_lun->pblocksize;
+ cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
+ value = ctl_get_opt(&cbe_lun->options, "capacity");
+ if (value != NULL)
+ ctl_expand_number(value, &be_lun->cap_bytes);
+ } else {
+ be_lun->pblockmul = 1;
+ cbe_lun->pblockexp = 0;
}
/* Tell the user the blocksize we ended up using */
@@ -550,7 +1064,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
params->lun_size_bytes = be_lun->size_bytes;
value = ctl_get_opt(&cbe_lun->options, "unmap");
- if (value != NULL && strcmp(value, "on") == 0)
+ if (value == NULL || strcmp(value, "off") != 0)
cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
value = ctl_get_opt(&cbe_lun->options, "readonly");
if (value != NULL) {
@@ -605,6 +1119,11 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
}
STAILQ_INIT(&be_lun->cont_queue);
+ sx_init(&be_lun->page_lock, "cram page lock");
+ if (be_lun->cap_bytes == 0)
+ be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
+ be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
+ M_WAITOK|M_ZERO);
mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
be_lun);
@@ -679,10 +1198,12 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
bailout_error:
req->status = CTL_LUN_ERROR;
if (be_lun != NULL) {
- if (be_lun->io_taskqueue != NULL) {
+ if (be_lun->io_taskqueue != NULL)
taskqueue_free(be_lun->io_taskqueue);
- }
ctl_free_opts(&cbe_lun->options);
+ free(be_lun->zero_page, M_RAMDISK);
+ ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
+ sx_destroy(&be_lun->page_lock);
mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
@@ -838,104 +1359,3 @@ ctl_backend_ramdisk_lun_config_status(void *be_lun,
}
mtx_unlock(&softc->lock);
}
-
-static int
-ctl_backend_ramdisk_config_write(union ctl_io *io)
-{
- struct ctl_be_lun *cbe_lun;
- int retval;
-
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
- retval = 0;
- switch (io->scsiio.cdb[0]) {
- case SYNCHRONIZE_CACHE:
- case SYNCHRONIZE_CACHE_16:
- /*
- * The upper level CTL code will filter out any CDBs with
- * the immediate bit set and return the proper error. It
- * will also not allow a sync cache command to go to a LUN
- * that is powered down.
- *
- * We don't really need to worry about what LBA range the
- * user asked to be synced out. When they issue a sync
- * cache command, we'll sync out the whole thing.
- *
- * This is obviously just a stubbed out implementation.
- * The real implementation will be in the RAIDCore/CTL
- * interface, and can only really happen when RAIDCore
- * implements a per-array cache sync.
- */
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- case START_STOP_UNIT: {
- struct scsi_start_stop_unit *cdb;
-
- cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
- if ((cdb->how & SSS_PC_MASK) != 0) {
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- }
- if (cdb->how & SSS_START) {
- if (cdb->how & SSS_LOEJ)
- ctl_lun_has_media(cbe_lun);
- ctl_start_lun(cbe_lun);
- } else {
- ctl_stop_lun(cbe_lun);
- if (cdb->how & SSS_LOEJ)
- ctl_lun_ejected(cbe_lun);
- }
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- }
- case PREVENT_ALLOW:
- case WRITE_SAME_10:
- case WRITE_SAME_16:
- case UNMAP:
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- default:
- ctl_set_invalid_opcode(&io->scsiio);
- ctl_config_write_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- }
-
- return (retval);
-}
-
-static int
-ctl_backend_ramdisk_config_read(union ctl_io *io)
-{
- int retval = 0;
-
- switch (io->scsiio.cdb[0]) {
- case SERVICE_ACTION_IN:
- if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
- /* We have nothing to tell, leave default data. */
- ctl_config_read_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- }
- ctl_set_invalid_field(&io->scsiio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_config_read_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- default:
- ctl_set_invalid_opcode(&io->scsiio);
- ctl_config_read_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- }
-
- return (retval);
-}
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
index eaedea6..e42df74 100644
--- a/sys/cam/ctl/ctl_cmd_table.c
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -990,7 +990,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_NO_MEDIA |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE, 6, {0x11, 0, 0, 0xff, 0x07}},
+ CTL_LUN_PAT_NONE, 6, {0x13, 0, 0, 0xff, 0x07}},
/* 16 RESERVE(6) */
{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
@@ -1260,7 +1260,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_NO_MEDIA |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE, 10, {0x11, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
+ CTL_LUN_PAT_NONE, 10, {0x13, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
/* 56 RESERVE(10) */
{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
diff --git a/sys/cam/ctl/ctl_error.c b/sys/cam/ctl/ctl_error.c
index 02330a1..9c222fa 100644
--- a/sys/cam/ctl/ctl_error.c
+++ b/sys/cam/ctl/ctl_error.c
@@ -129,7 +129,7 @@ ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key,
* completed. Therefore we can safely access the LUN structure and
* flags without the lock.
*/
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ lun = CTL_LUN(ctsio);
va_start(ap, ascq);
sense_len = 0;
@@ -641,6 +641,18 @@ ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
/*data*/ sks,
SSD_ELEM_NONE);
}
+void
+ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio)
+{
+
+ /* "Invalid field in command information unit" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ABORTED_COMMAND,
+ /*ascq*/ 0x0E,
+ /*ascq*/ 0x03,
+ SSD_ELEM_NONE);
+}
void
ctl_set_invalid_opcode(struct ctl_scsiio *ctsio)
diff --git a/sys/cam/ctl/ctl_error.h b/sys/cam/ctl/ctl_error.h
index d4cdbb3..75c948c 100644
--- a/sys/cam/ctl/ctl_error.h
+++ b/sys/cam/ctl/ctl_error.h
@@ -66,6 +66,7 @@ void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);
void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag);
void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
int field, int bit_valid, int bit);
+void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio);
void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
void ctl_set_already_locked(struct ctl_scsiio *ctsio);
diff --git a/sys/cam/ctl/ctl_frontend.c b/sys/cam/ctl/ctl_frontend.c
index 75837b5..765a31d 100644
--- a/sys/cam/ctl/ctl_frontend.c
+++ b/sys/cam/ctl/ctl_frontend.c
@@ -1,5 +1,6 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,12 +70,11 @@ ctl_frontend_register(struct ctl_frontend *fe)
{
struct ctl_softc *softc = control_softc;
struct ctl_frontend *fe_tmp;
+ int error;
KASSERT(softc != NULL, ("CTL is not initialized"));
- /*
- * Sanity check, make sure this isn't a duplicate registration.
- */
+ /* Sanity check, make sure this isn't a duplicate registration. */
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(fe_tmp, &softc->fe_list, links) {
if (strcmp(fe_tmp->name, fe->name) == 0) {
@@ -85,11 +85,14 @@ ctl_frontend_register(struct ctl_frontend *fe)
mtx_unlock(&softc->ctl_lock);
STAILQ_INIT(&fe->port_list);
- /*
- * Call the frontend's initialization routine.
- */
- if (fe->init != NULL)
- fe->init();
+ /* Call the frontend's initialization routine. */
+ if (fe->init != NULL) {
+ if ((error = fe->init()) != 0) {
+ printf("%s frontend init error: %d\n",
+ fe->name, error);
+ return (error);
+ }
+ }
mtx_lock(&softc->ctl_lock);
softc->num_frontends++;
@@ -102,20 +105,21 @@ int
ctl_frontend_deregister(struct ctl_frontend *fe)
{
struct ctl_softc *softc = control_softc;
-
- if (!STAILQ_EMPTY(&fe->port_list))
- return (-1);
+ int error;
+
+ /* Call the frontend's shutdown routine.*/
+ if (fe->shutdown != NULL) {
+ if ((error = fe->shutdown()) != 0) {
+ printf("%s frontend shutdown error: %d\n",
+ fe->name, error);
+ return (error);
+ }
+ }
mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->fe_list, fe, ctl_frontend, links);
softc->num_frontends--;
mtx_unlock(&softc->ctl_lock);
-
- /*
- * Call the frontend's shutdown routine.
- */
- if (fe->shutdown != NULL)
- fe->shutdown();
return (0);
}
@@ -192,13 +196,14 @@ error:
mtx_unlock(&softc->ctl_lock);
return (retval);
}
+ port->targ_port = port_num;
port->ctl_pool_ref = pool;
-
if (port->options.stqh_first == NULL)
STAILQ_INIT(&port->options);
+ port->stats.item = port_num;
+ mtx_init(&port->port_lock, "CTL port", NULL, MTX_DEF);
mtx_lock(&softc->ctl_lock);
- port->targ_port = port_num;
STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list);
nport != NULL && nport->targ_port < port_num;
@@ -218,17 +223,11 @@ int
ctl_port_deregister(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
- struct ctl_io_pool *pool;
- int retval, i;
-
- retval = 0;
+ struct ctl_io_pool *pool = (struct ctl_io_pool *)port->ctl_pool_ref;
+ int i;
- pool = (struct ctl_io_pool *)port->ctl_pool_ref;
-
- if (port->targ_port == -1) {
- retval = 1;
- goto bailout;
- }
+ if (port->targ_port == -1)
+ return (1);
mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->port_list, port, ctl_port, links);
@@ -251,9 +250,9 @@ ctl_port_deregister(struct ctl_port *port)
for (i = 0; i < port->max_initiators; i++)
free(port->wwpn_iid[i].name, M_CTL);
free(port->wwpn_iid, M_CTL);
+ mtx_destroy(&port->port_lock);
-bailout:
- return (retval);
+ return (0);
}
void
@@ -315,9 +314,9 @@ ctl_port_online(struct ctl_port *port)
if (port->lun_enable != NULL) {
if (port->lun_map) {
- for (l = 0; l < CTL_MAX_LUNS; l++) {
- if (ctl_lun_map_from_port(port, l) >=
- CTL_MAX_LUNS)
+ for (l = 0; l < port->lun_map_size; l++) {
+ if (ctl_lun_map_from_port(port, l) ==
+ UINT32_MAX)
continue;
port->lun_enable(port->targ_lun_arg, l);
}
@@ -338,7 +337,7 @@ ctl_port_online(struct ctl_port *port)
}
port->status |= CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
@@ -359,9 +358,9 @@ ctl_port_offline(struct ctl_port *port)
port->port_offline(port->onoff_arg);
if (port->lun_disable != NULL) {
if (port->lun_map) {
- for (l = 0; l < CTL_MAX_LUNS; l++) {
- if (ctl_lun_map_from_port(port, l) >=
- CTL_MAX_LUNS)
+ for (l = 0; l < port->lun_map_size; l++) {
+ if (ctl_lun_map_from_port(port, l) ==
+ UINT32_MAX)
continue;
port->lun_disable(port->targ_lun_arg, l);
}
@@ -373,7 +372,7 @@ ctl_port_offline(struct ctl_port *port)
mtx_lock(&softc->ctl_lock);
port->status &= ~CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
diff --git a/sys/cam/ctl/ctl_frontend.h b/sys/cam/ctl/ctl_frontend.h
index 1dd970a..38eb863 100644
--- a/sys/cam/ctl/ctl_frontend.h
+++ b/sys/cam/ctl/ctl_frontend.h
@@ -1,5 +1,6 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +40,8 @@
#ifndef _CTL_FRONTEND_H_
#define _CTL_FRONTEND_H_
+#include <cam/ctl/ctl_ioctl.h>
+
typedef enum {
CTL_PORT_STATUS_NONE = 0x00,
CTL_PORT_STATUS_ONLINE = 0x01,
@@ -46,7 +49,7 @@ typedef enum {
} ctl_port_status;
typedef int (*fe_init_t)(void);
-typedef void (*fe_shutdown_t)(void);
+typedef int (*fe_shutdown_t)(void);
typedef void (*port_func_t)(void *onoff_arg);
typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb);
typedef int (*lun_func_t)(void *arg, int lun_id);
@@ -58,12 +61,13 @@ typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
{ \
switch (type) { \
case MOD_LOAD: \
- ctl_frontend_register( \
- (struct ctl_frontend *)data); \
+ return (ctl_frontend_register( \
+ (struct ctl_frontend *)data)); \
break; \
case MOD_UNLOAD: \
- printf(#name " module unload - not possible for this module type\n"); \
- return EINVAL; \
+ return (ctl_frontend_deregister( \
+ (struct ctl_frontend *)data)); \
+ break; \
default: \
return EOPNOTSUPP; \
} \
@@ -225,6 +229,7 @@ struct ctl_port {
void *onoff_arg; /* passed to CTL */
lun_func_t lun_enable; /* passed to CTL */
lun_func_t lun_disable; /* passed to CTL */
+ int lun_map_size; /* passed to CTL */
uint32_t *lun_map; /* passed to CTL */
void *targ_lun_arg; /* passed to CTL */
void (*fe_datamove)(union ctl_io *io); /* passed to CTL */
@@ -242,6 +247,8 @@ struct ctl_port {
struct ctl_devid *port_devid; /* passed to CTL */
struct ctl_devid *target_devid; /* passed to CTL */
struct ctl_devid *init_devid; /* passed to CTL */
+ struct ctl_io_stats stats; /* used by CTL */
+ struct mtx port_lock; /* used by CTL */
STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */
STAILQ_ENTRY(ctl_port) links; /* used by CTL */
};
diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c
index 2166f45..9a0ca9a 100644
--- a/sys/cam/ctl/ctl_frontend_cam_sim.c
+++ b/sys/cam/ctl/ctl_frontend_cam_sim.c
@@ -94,15 +94,14 @@ struct cfcs_softc {
CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \
CAM_SENSE_PHYS)
-int cfcs_init(void);
+static int cfcs_init(void);
+static int cfcs_shutdown(void);
static void cfcs_poll(struct cam_sim *sim);
static void cfcs_online(void *arg);
static void cfcs_offline(void *arg);
static void cfcs_datamove(union ctl_io *io);
static void cfcs_done(union ctl_io *io);
void cfcs_action(struct cam_sim *sim, union ccb *ccb);
-static void cfcs_async(void *callback_arg, uint32_t code,
- struct cam_path *path, void *arg);
struct cfcs_softc cfcs_softc;
/*
@@ -121,14 +120,14 @@ static struct ctl_frontend cfcs_frontend =
{
.name = "camsim",
.init = cfcs_init,
+ .shutdown = cfcs_shutdown,
};
CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend);
-int
+static int
cfcs_init(void)
{
struct cfcs_softc *softc;
- struct ccb_setasync csa;
struct ctl_port *port;
int retval;
@@ -214,13 +213,6 @@ cfcs_init(void)
goto bailout;
}
- xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE);
- csa.ccb_h.func_code = XPT_SASYNC_CB;
- csa.event_enable = AC_LOST_DEVICE;
- csa.callback = cfcs_async;
- csa.callback_arg = softc->sim;
- xpt_action((union ccb *)&csa);
-
mtx_unlock(&softc->lock);
return (retval);
@@ -236,6 +228,27 @@ bailout:
return (retval);
}
+static int
+cfcs_shutdown(void)
+{
+ struct cfcs_softc *softc = &cfcs_softc;
+ struct ctl_port *port = &softc->port;
+ int error;
+
+ ctl_port_offline(port);
+
+ mtx_lock(&softc->lock);
+ xpt_free_path(softc->path);
+ xpt_bus_deregister(cam_sim_path(softc->sim));
+ cam_sim_free(softc->sim, /*free_devq*/ TRUE);
+ mtx_unlock(&softc->lock);
+ mtx_destroy(&softc->lock);
+
+ if ((error = ctl_port_deregister(port)) != 0)
+ printf("%s: cam_sim port deregistration failed\n", __func__);
+ return (error);
+}
+
static void
cfcs_poll(struct cam_sim *sim)
{
@@ -300,14 +313,10 @@ cfcs_datamove(union ctl_io *io)
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
int cam_sg_count, ctl_sg_count, cam_sg_start;
int cam_sg_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int ctl_watermark, cam_watermark;
int i, j;
-
- cam_sg_offset = 0;
- cam_sg_start = 0;
-
ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
/*
@@ -330,6 +339,8 @@ cfcs_datamove(union ctl_io *io)
cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
cam_sg_count = ccb->csio.sglist_cnt;
+ cam_sg_start = cam_sg_count;
+ cam_sg_offset = 0;
for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
if ((len_seen + cam_sglist[i].ds_len) >=
@@ -367,7 +378,6 @@ cfcs_datamove(union ctl_io *io)
ctl_watermark = 0;
cam_watermark = cam_sg_offset;
- len_copied = 0;
for (i = cam_sg_start, j = 0;
i < cam_sg_count && j < ctl_sg_count;) {
uint8_t *cam_ptr, *ctl_ptr;
@@ -389,9 +399,6 @@ cfcs_datamove(union ctl_io *io)
ctl_ptr = (uint8_t *)ctl_sglist[j].addr;
ctl_ptr = ctl_ptr + ctl_watermark;
- ctl_watermark += len_to_copy;
- cam_watermark += len_to_copy;
-
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n",
@@ -407,24 +414,27 @@ cfcs_datamove(union ctl_io *io)
bcopy(cam_ptr, ctl_ptr, len_to_copy);
}
- len_copied += len_to_copy;
+ io->scsiio.ext_data_filled += len_to_copy;
+ io->scsiio.kern_data_resid -= len_to_copy;
+ cam_watermark += len_to_copy;
if (cam_sglist[i].ds_len == cam_watermark) {
i++;
cam_watermark = 0;
}
+ ctl_watermark += len_to_copy;
if (ctl_sglist[j].len == ctl_watermark) {
j++;
ctl_watermark = 0;
}
}
- io->scsiio.ext_data_filled += len_copied;
-
if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
+ ccb->csio.resid = ccb->csio.dxfer_len -
+ io->scsiio.ext_data_filled;
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_REQ_CMP;
xpt_done(ccb);
@@ -453,6 +463,10 @@ cfcs_done(union ctl_io *io)
/*
* Translate CTL status to CAM status.
*/
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ ccb->csio.resid = ccb->csio.dxfer_len -
+ io->scsiio.ext_data_filled;
+ }
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
switch (io->io_hdr.status & CTL_STATUS_MASK) {
case CTL_SUCCESS:
@@ -587,8 +601,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
__func__, csio->cdb_len, sizeof(io->scsiio.cdb));
}
io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb));
- bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb,
- io->scsiio.cdb_len);
+ bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len);
ccb->ccb_h.status |= CAM_SIM_QUEUED;
err = ctl_queue(io);
@@ -801,9 +814,3 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
break;
}
}
-
-static void
-cfcs_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
-{
-
-}
diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c
index 97f29f1..4063c97 100644
--- a/sys/cam/ctl/ctl_frontend_ioctl.c
+++ b/sys/cam/ctl/ctl_frontend_ioctl.c
@@ -76,7 +76,7 @@ struct cfi_softc {
static struct cfi_softc cfi_softc;
static int cfi_init(void);
-static void cfi_shutdown(void);
+static int cfi_shutdown(void);
static void cfi_datamove(union ctl_io *io);
static void cfi_done(union ctl_io *io);
@@ -93,6 +93,7 @@ cfi_init(void)
{
struct cfi_softc *isoftc = &cfi_softc;
struct ctl_port *port;
+ int error = 0;
memset(isoftc, 0, sizeof(*isoftc));
@@ -108,24 +109,25 @@ cfi_init(void)
port->targ_port = -1;
port->max_initiators = 1;
- if (ctl_port_register(port) != 0) {
+ if ((error = ctl_port_register(port)) != 0) {
printf("%s: ioctl port registration failed\n", __func__);
- return (0);
+ return (error);
}
ctl_port_online(port);
return (0);
}
-void
+static int
cfi_shutdown(void)
{
struct cfi_softc *isoftc = &cfi_softc;
- struct ctl_port *port;
+ struct ctl_port *port = &isoftc->port;
+ int error = 0;
- port = &isoftc->port;
ctl_port_offline(port);
- if (ctl_port_deregister(&isoftc->port) != 0)
- printf("%s: ctl_frontend_deregister() failed\n", __func__);
+ if ((error = ctl_port_deregister(port)) != 0)
+ printf("%s: ioctl port deregistration failed\n", __func__);
+ return (error);
}
/*
@@ -138,22 +140,20 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sglen, ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int kern_watermark, ext_watermark;
int ext_sglist_malloced;
int i, j;
- ext_sglist_malloced = 0;
- ext_sg_start = 0;
- ext_offset = 0;
-
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
/*
* If this flag is set, fake the data transfer.
*/
if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
- ctsio->ext_data_filled = ctsio->ext_data_len;
+ ext_sglist_malloced = 0;
+ ctsio->ext_data_filled += ctsio->kern_data_len;
+ ctsio->kern_data_resid = 0;
goto bailout;
}
@@ -165,7 +165,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
int len_seen;
ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
-
ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
M_WAITOK);
ext_sglist_malloced = 1;
@@ -174,6 +173,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
goto bailout;
}
ext_sg_entries = ctsio->ext_sg_entries;
+ ext_sg_start = ext_sg_entries;
+ ext_offset = 0;
len_seen = 0;
for (i = 0; i < ext_sg_entries; i++) {
if ((len_seen + ext_sglist[i].len) >=
@@ -186,6 +187,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
} else {
ext_sglist = &ext_entry;
+ ext_sglist_malloced = 0;
ext_sglist->addr = ctsio->ext_data_ptr;
ext_sglist->len = ctsio->ext_data_len;
ext_sg_entries = 1;
@@ -203,10 +205,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_sg_entries = 1;
}
-
kern_watermark = 0;
ext_watermark = ext_offset;
- len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@@ -228,9 +228,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
- kern_watermark += len_to_copy;
- ext_watermark += len_to_copy;
-
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
@@ -252,21 +249,22 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
}
- len_copied += len_to_copy;
+ ctsio->ext_data_filled += len_to_copy;
+ ctsio->kern_data_resid -= len_to_copy;
+ ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
+ kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
- ctsio->ext_data_filled += len_copied;
-
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
"kern_sg_entries: %d\n", ext_sg_entries,
kern_sg_entries));
@@ -274,10 +272,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
"kern_data_len = %d\n", ctsio->ext_data_len,
ctsio->kern_data_len));
-
- /* XXX KDM set residual?? */
bailout:
-
if (ext_sglist_malloced != 0)
free(ext_sglist, M_CTL);
@@ -397,7 +392,7 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
union ctl_io *io;
- void *pool_tmp;
+ void *pool_tmp, *sc_tmp;
int retval = 0;
/*
@@ -414,8 +409,10 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* spammed by the user's ctl_io.
*/
pool_tmp = io->io_hdr.pool;
+ sc_tmp = CTL_SOFTC(io);
memcpy(io, (void *)addr, sizeof(*io));
io->io_hdr.pool = pool_tmp;
+ CTL_SOFTC(io) = sc_tmp;
/*
* No status yet, so make sure the status is set properly.
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c
index 70de5fa..72580e9 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.c
+++ b/sys/cam/ctl/ctl_frontend_iscsi.c
@@ -144,7 +144,8 @@ SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
#define PDU_TOTAL_TRANSFER_LEN(X) (X)->ip_prv1
#define PDU_R2TSN(X) (X)->ip_prv2
-int cfiscsi_init(void);
+static int cfiscsi_init(void);
+static int cfiscsi_shutdown(void);
static void cfiscsi_online(void *arg);
static void cfiscsi_offline(void *arg);
static int cfiscsi_info(void *arg, struct sbuf *sb);
@@ -182,6 +183,7 @@ static struct ctl_frontend cfiscsi_frontend =
.name = "iscsi",
.init = cfiscsi_init,
.ioctl = cfiscsi_ioctl,
+ .shutdown = cfiscsi_shutdown,
};
CTL_FRONTEND_DECLARE(ctlcfiscsi, cfiscsi_frontend);
MODULE_DEPEND(ctlcfiscsi, icl, 1, 1, 1);
@@ -769,6 +771,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
cdw->cdw_sg_len -= copy_len;
off += copy_len;
io->scsiio.ext_data_filled += copy_len;
+ io->scsiio.kern_data_resid -= copy_len;
if (cdw->cdw_sg_len == 0) {
/*
@@ -1320,7 +1323,7 @@ cfiscsi_session_delete(struct cfiscsi_session *cs)
free(cs, M_CFISCSI);
}
-int
+static int
cfiscsi_init(void)
{
struct cfiscsi_softc *softc;
@@ -1343,6 +1346,23 @@ cfiscsi_init(void)
return (0);
}
+static int
+cfiscsi_shutdown(void)
+{
+ struct cfiscsi_softc *softc = &cfiscsi_softc;
+
+ if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets))
+ return (EBUSY);
+
+ uma_zdestroy(cfiscsi_data_wait_zone);
+#ifdef ICL_KERNEL_PROXY
+ cv_destroy(&softc->accept_cv);
+#endif
+ cv_destroy(&softc->sessions_cv);
+ mtx_destroy(&softc->lock);
+ return (0);
+}
+
#ifdef ICL_KERNEL_PROXY
static void
cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id)
@@ -2073,7 +2093,8 @@ cfiscsi_ioctl_port_create(struct ctl_req *req)
if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) {
req->status = CTL_LUN_ERROR;
snprintf(req->error_str, sizeof(req->error_str),
- "target \"%s\" already exists", target);
+ "target \"%s\" for portal group tag %u already exists",
+ target, tag);
cfiscsi_target_release(ct);
ctl_free_opts(&opts);
return;
@@ -2504,6 +2525,7 @@ cfiscsi_datamove_in(union ctl_io *io)
}
sg_addr += len;
sg_len -= len;
+ io->scsiio.kern_data_resid -= len;
KASSERT(buffer_offset + response->ip_data_len <= expected_len,
("buffer_offset %zd + ip_data_len %zd > expected_len %zd",
@@ -2589,7 +2611,7 @@ cfiscsi_datamove_out(union ctl_io *io)
struct iscsi_bhs_r2t *bhsr2t;
struct cfiscsi_data_wait *cdw;
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
- uint32_t expected_len, r2t_off, r2t_len;
+ uint32_t expected_len, datamove_len, r2t_off, r2t_len;
uint32_t target_transfer_tag;
bool done;
@@ -2608,16 +2630,15 @@ cfiscsi_datamove_out(union ctl_io *io)
PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len;
/*
- * Report write underflow as error since CTL and backends don't
- * really support it, and SCSI does not tell how to do it right.
+ * Complete write underflow. Not a single byte to read. Return.
*/
expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length);
- if (io->scsiio.kern_rel_offset + io->scsiio.kern_data_len >
- expected_len) {
- io->scsiio.io_hdr.port_status = 43;
+ if (io->scsiio.kern_rel_offset >= expected_len) {
io->scsiio.be_move_done(io);
return;
}
+ datamove_len = MIN(io->scsiio.kern_data_len,
+ expected_len - io->scsiio.kern_rel_offset);
target_transfer_tag =
atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1);
@@ -2640,7 +2661,7 @@ cfiscsi_datamove_out(union ctl_io *io)
cdw->cdw_ctl_io = io;
cdw->cdw_target_transfer_tag = target_transfer_tag;
cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
- cdw->cdw_r2t_end = io->scsiio.kern_data_len;
+ cdw->cdw_r2t_end = datamove_len;
cdw->cdw_datasn = 0;
/* Set initial data pointer for the CDW respecting ext_data_filled. */
@@ -2649,7 +2670,7 @@ cfiscsi_datamove_out(union ctl_io *io)
} else {
ctl_sglist = &ctl_sg_entry;
ctl_sglist->addr = io->scsiio.kern_data_ptr;
- ctl_sglist->len = io->scsiio.kern_data_len;
+ ctl_sglist->len = datamove_len;
}
cdw->cdw_sg_index = 0;
cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
@@ -2680,7 +2701,7 @@ cfiscsi_datamove_out(union ctl_io *io)
}
r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled;
- r2t_len = MIN(io->scsiio.kern_data_len - io->scsiio.ext_data_filled,
+ r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled,
cs->cs_max_burst_length);
cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len;
diff --git a/sys/cam/ctl/ctl_ha.c b/sys/cam/ctl/ctl_ha.c
index d7f21c7..3402f04 100644
--- a/sys/cam/ctl/ctl_ha.c
+++ b/sys/cam/ctl/ctl_ha.c
@@ -1001,7 +1001,7 @@ ctl_ha_msg_shutdown(struct ctl_softc *ctl_softc)
softc->ha_shutdown = 1;
softc->ha_wakeup = 1;
wakeup(&softc->ha_wakeup);
- while (softc->ha_shutdown < 2) {
+ while (softc->ha_shutdown < 2 && !SCHEDULER_STOPPED()) {
msleep(&softc->ha_wakeup, &softc->ha_lock, 0,
"shutdown", hz);
}
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index bad030f..9c472f5 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -145,7 +145,9 @@ struct ctl_ptr_len_flags {
union ctl_priv {
uint8_t bytes[sizeof(uint64_t) * 2];
uint64_t integer;
+ uint64_t integers[2];
void *ptr;
+ void *ptrs[2];
};
/*
@@ -164,6 +166,12 @@ union ctl_priv {
#define CTL_PRIV_FRONTEND 4 /* Frontend storage */
#define CTL_PRIV_FRONTEND2 5 /* Another frontend storage */
+#define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0])
+#define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1])
+#define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0])
+#define CTL_PORT(io) (((struct ctl_softc *)CTL_SOFTC(io))-> \
+ ctl_ports[(io)->io_hdr.nexus.targ_port])
+
#define CTL_INVALID_PORTNAME 0xFF
#define CTL_UNMAPPED_IID 0xFF
@@ -312,7 +320,7 @@ struct ctl_scsiio {
uint8_t sense_len; /* Returned sense length */
uint8_t scsi_status; /* SCSI status byte */
uint8_t sense_residual; /* Unused. */
- uint32_t residual; /* data residual length */
+ uint32_t residual; /* Unused */
uint32_t tag_num; /* tag number */
ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/
uint8_t cdb_len; /* CDB length */
@@ -365,7 +373,7 @@ struct ctl_taskio {
/*
* HA link messages.
*/
-#define CTL_HA_VERSION 1
+#define CTL_HA_VERSION 3
/*
* Used for CTL_MSG_LOGIN.
@@ -461,7 +469,8 @@ struct ctl_ha_msg_dt {
};
/*
- * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU.
+ * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU,
+ * and CTL_MSG_DATAMOVE_DONE.
*/
struct ctl_ha_msg_scsi {
struct ctl_ha_msg_hdr hdr;
@@ -471,10 +480,9 @@ struct ctl_ha_msg_scsi {
uint8_t cdb_len; /* CDB length */
uint8_t scsi_status; /* SCSI status byte */
uint8_t sense_len; /* Returned sense length */
- uint8_t sense_residual; /* sense residual length */
- uint32_t residual; /* data residual length */
- uint32_t fetd_status; /* trans status, set by FETD,
+ uint32_t port_status; /* trans status, set by FETD,
0 = good*/
+ uint32_t kern_data_resid; /* for DATAMOVE_DONE */
struct scsi_sense_data sense_data; /* sense data */
};
diff --git a/sys/cam/ctl/ctl_ioctl.h b/sys/cam/ctl/ctl_ioctl.h
index 7d1128d..cd20581 100644
--- a/sys/cam/ctl/ctl_ioctl.h
+++ b/sys/cam/ctl/ctl_ioctl.h
@@ -1,6 +1,7 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
* Copyright (c) 2011 Spectra Logic Corporation
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,6 +81,9 @@
/* Hopefully this won't conflict with new misc devices that pop up */
#define CTL_MINOR 225
+/* Legacy statistics accumulated for every port for every LU. */
+#define CTL_LEGACY_STATS 1
+
typedef enum {
CTL_DELAY_TYPE_NONE,
CTL_DELAY_TYPE_CONT,
@@ -117,6 +121,18 @@ typedef enum {
#define CTL_STATS_NUM_TYPES 3
typedef enum {
+ CTL_SS_OK,
+ CTL_SS_NEED_MORE_SPACE,
+ CTL_SS_ERROR
+} ctl_stats_status;
+
+typedef enum {
+ CTL_STATS_FLAG_NONE = 0x00,
+ CTL_STATS_FLAG_TIME_VALID = 0x01
+} ctl_stats_flags;
+
+#ifdef CTL_LEGACY_STATS
+typedef enum {
CTL_LUN_STATS_NO_BLOCKSIZE = 0x01
} ctl_lun_stats_flags;
@@ -137,17 +153,6 @@ struct ctl_lun_io_stats {
struct ctl_lun_io_port_stats ports[CTL_MAX_PORTS];
};
-typedef enum {
- CTL_SS_OK,
- CTL_SS_NEED_MORE_SPACE,
- CTL_SS_ERROR
-} ctl_stats_status;
-
-typedef enum {
- CTL_STATS_FLAG_NONE = 0x00,
- CTL_STATS_FLAG_TIME_VALID = 0x01
-} ctl_stats_flags;
-
struct ctl_stats {
int alloc_len; /* passed to kernel */
struct ctl_lun_io_stats *lun_stats; /* passed to/from kernel */
@@ -157,6 +162,27 @@ struct ctl_stats {
ctl_stats_flags flags; /* passed to userland */
struct timespec timestamp; /* passed to userland */
};
+#endif /* CTL_LEGACY_STATS */
+
+struct ctl_io_stats {
+ uint32_t item;
+ uint64_t bytes[CTL_STATS_NUM_TYPES];
+ uint64_t operations[CTL_STATS_NUM_TYPES];
+ uint64_t dmas[CTL_STATS_NUM_TYPES];
+ struct bintime time[CTL_STATS_NUM_TYPES];
+ struct bintime dma_time[CTL_STATS_NUM_TYPES];
+};
+
+struct ctl_get_io_stats {
+ struct ctl_io_stats *stats; /* passed to/from kernel */
+ size_t alloc_len; /* passed to kernel */
+ size_t fill_len; /* passed to userland */
+ int first_item; /* passed to kernel */
+ int num_items; /* passed to userland */
+ ctl_stats_status status; /* passed to userland */
+ ctl_stats_flags flags; /* passed to userland */
+ struct timespec timestamp; /* passed to userland */
+};
/*
* The types of errors that can be injected:
@@ -342,12 +368,54 @@ typedef enum {
CTL_LUNREQ_MODIFY,
} ctl_lunreq_type;
+/*
+ * The ID_REQ flag is used to say that the caller has requested a
+ * particular LUN ID in the req_lun_id field. If we cannot allocate that
+ * LUN ID, the ctl_add_lun() call will fail.
+ *
+ * The STOPPED flag tells us that the LUN should default to the powered
+ * off state. It will return 0x04,0x02 until it is powered up. ("Logical
+ * unit not ready, initializing command required.")
+ *
+ * The NO_MEDIA flag tells us that the LUN has no media inserted.
+ *
+ * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
+ * which is accessible via the Master shelf controller in an HA. This flag
+ * being set indicates a Primary LUN. This flag being reset represents a
+ * Secondary LUN controlled by the Secondary controller in an HA
+ * configuration. Flag is applicable at this time to T_DIRECT types.
+ *
+ * The SERIAL_NUM flag tells us that the serial_num field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x80.
+ *
+ * The DEVID flag tells us that the device_id field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x83.
+ *
+ * The DEV_TYPE flag tells us that the device_type field is filled in.
+ *
+ * The EJECTED flag tells us that the removable LUN has tray open.
+ *
+ * The UNMAP flag tells us that this LUN supports UNMAP.
+ *
+ * The OFFLINE flag tells us that this LUN can not access backing store.
+ */
+typedef enum {
+ CTL_LUN_FLAG_ID_REQ = 0x01,
+ CTL_LUN_FLAG_STOPPED = 0x02,
+ CTL_LUN_FLAG_NO_MEDIA = 0x04,
+ CTL_LUN_FLAG_PRIMARY = 0x08,
+ CTL_LUN_FLAG_SERIAL_NUM = 0x10,
+ CTL_LUN_FLAG_DEVID = 0x20,
+ CTL_LUN_FLAG_DEV_TYPE = 0x40,
+ CTL_LUN_FLAG_UNMAP = 0x80,
+ CTL_LUN_FLAG_EJECTED = 0x100,
+ CTL_LUN_FLAG_READONLY = 0x200
+} ctl_backend_lun_flags;
/*
* LUN creation parameters:
*
- * flags: Various LUN flags, see ctl_backend.h for a
- * description of the flag values and meanings.
+ * flags: Various LUN flags, see above.
*
* device_type: The SCSI device type. e.g. 0 for Direct Access,
* 3 for Processor, etc. Only certain backends may
@@ -465,6 +533,7 @@ union ctl_lunreq_data {
* kern_be_args: For kernel use only.
*/
struct ctl_lun_req {
+#define CTL_BE_NAME_LEN 32
char backend[CTL_BE_NAME_LEN];
ctl_lunreq_type reqtype;
union ctl_lunreq_data reqdata;
@@ -773,6 +842,8 @@ struct ctl_lun_map {
#define CTL_PORT_REQ _IOWR(CTL_MINOR, 0x26, struct ctl_req)
#define CTL_PORT_LIST _IOWR(CTL_MINOR, 0x27, struct ctl_lun_list)
#define CTL_LUN_MAP _IOW(CTL_MINOR, 0x28, struct ctl_lun_map)
+#define CTL_GET_LUN_STATS _IOWR(CTL_MINOR, 0x29, struct ctl_get_io_stats)
+#define CTL_GET_PORT_STATS _IOWR(CTL_MINOR, 0x2a, struct ctl_get_io_stats)
#endif /* _CTL_IOCTL_H_ */
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index e118343..40f0e61 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -1,6 +1,6 @@
/*-
* Copyright (c) 2003, 2004, 2005, 2008 Silicon Graphics International Corp.
- * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -404,7 +404,10 @@ struct ctl_lun {
struct callout ie_callout; /* INTERVAL TIMER */
struct ctl_mode_pages mode_pages;
struct ctl_log_pages log_pages;
- struct ctl_lun_io_stats stats;
+#ifdef CTL_LEGACY_STATS
+ struct ctl_lun_io_stats legacy_stats;
+#endif /* CTL_LEGACY_STATS */
+ struct ctl_io_stats stats;
uint32_t res_idx;
uint32_t pr_generation;
uint64_t *pr_keys[CTL_MAX_PORTS];
@@ -412,7 +415,7 @@ struct ctl_lun {
uint32_t pr_res_idx;
uint8_t pr_res_type;
int prevent_count;
- uint32_t prevent[(CTL_MAX_INITIATORS+31)/32];
+ uint32_t *prevent;
uint8_t *write_buffer;
struct ctl_devid *lun_devid;
TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists;
@@ -467,7 +470,10 @@ struct ctl_softc {
STAILQ_HEAD(, ctl_backend_driver) be_list;
struct uma_zone *io_zone;
uint32_t cur_pool_id;
+ int shutdown;
struct ctl_thread threads[CTL_MAX_THREADS];
+ struct thread *lun_thread;
+ struct thread *thresh_thread;
TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens;
struct callout tpc_timeout;
struct mtx tpc_lock;
diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c
index 484293b..c8d60ca 100644
--- a/sys/cam/ctl/ctl_tpc.c
+++ b/sys/cam/ctl/ctl_tpc.c
@@ -251,6 +251,7 @@ ctl_tpc_lun_shutdown(struct ctl_lun *lun)
int
ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_tpc *tpc_ptr;
struct scsi_vpd_tpc_descriptor *d_ptr;
struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
@@ -264,11 +265,8 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_tpc) +
sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
@@ -284,20 +282,10 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -460,20 +448,10 @@ ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
@@ -521,7 +499,7 @@ tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
int
ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_copy_status_lid1 *cdb;
struct scsi_receive_copy_status_lid1_data *data;
struct tpc_list *list;
@@ -533,8 +511,6 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = cdb->list_identifier;
@@ -560,20 +536,10 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4, data->available_data);
@@ -603,7 +569,7 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
int
ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_copy_failure_details *cdb;
struct scsi_receive_copy_failure_details_data *data;
struct tpc_list *list;
@@ -615,8 +581,6 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = cdb->list_identifier;
@@ -640,20 +604,10 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
if (list_copy.completed && (list_copy.error || list_copy.abort)) {
@@ -675,7 +629,7 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
int
ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_copy_status_lid4 *cdb;
struct scsi_receive_copy_status_lid4_data *data;
struct tpc_list *list;
@@ -687,8 +641,6 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = scsi_4btoul(cdb->list_identifier);
@@ -714,20 +666,10 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
@@ -761,7 +703,7 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
int
ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_copy_operation_abort *cdb;
struct tpc_list *list;
int retval;
@@ -770,8 +712,6 @@ ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = scsi_4btoul(cdb->list_identifier);
@@ -894,7 +834,7 @@ tpc_process_b2b(struct tpc_list *list)
dcscd = scsi_2btoul(seg->dst_cscd);
sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
- if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
+ if (sl == UINT64_MAX || dl == UINT64_MAX) {
ctl_set_sense(list->ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_COPY_ABORTED,
/*asc*/ 0x08, /*ascq*/ 0x04,
@@ -1042,7 +982,7 @@ tpc_process_verify(struct tpc_list *list)
seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
cscd = scsi_2btoul(seg->src_cscd);
sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
- if (sl >= CTL_MAX_LUNS) {
+ if (sl == UINT64_MAX) {
ctl_set_sense(list->ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_COPY_ABORTED,
/*asc*/ 0x08, /*ascq*/ 0x04,
@@ -1106,7 +1046,7 @@ tpc_process_register_key(struct tpc_list *list)
seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
cscd = scsi_2btoul(seg->dst_cscd);
dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
- if (dl >= CTL_MAX_LUNS) {
+ if (dl == UINT64_MAX) {
ctl_set_sense(list->ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_COPY_ABORTED,
/*asc*/ 0x08, /*ascq*/ 0x04,
@@ -1705,11 +1645,11 @@ tpc_done(union ctl_io *io)
int
ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_extended_copy *cdb;
struct scsi_extended_copy_lid1_data *data;
struct scsi_ec_cscd *cscd;
struct scsi_ec_segment *seg;
- struct ctl_lun *lun;
struct tpc_list *list, *tlist;
uint8_t *ptr;
char *value;
@@ -1717,7 +1657,6 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_extended_copy *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -1741,7 +1680,6 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -1861,11 +1799,11 @@ done:
int
ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_extended_copy *cdb;
struct scsi_extended_copy_lid4_data *data;
struct scsi_ec_cscd *cscd;
struct scsi_ec_segment *seg;
- struct ctl_lun *lun;
struct tpc_list *list, *tlist;
uint8_t *ptr;
char *value;
@@ -1873,7 +1811,6 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_extended_copy *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -1897,7 +1834,6 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -2064,11 +2000,11 @@ tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
int
ctl_populate_token(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_populate_token *cdb;
struct scsi_populate_token_data *data;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
- struct ctl_port *port;
struct tpc_list *list, *tlist;
struct tpc_token *token;
uint64_t lba;
@@ -2076,9 +2012,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_populate_token\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
- port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port];
cdb = (struct scsi_populate_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -2098,7 +2031,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -2232,10 +2164,10 @@ done:
int
ctl_write_using_token(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_write_using_token *cdb;
struct scsi_write_using_token_data *data;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
struct tpc_list *list, *tlist;
struct tpc_token *token;
uint64_t lba;
@@ -2243,8 +2175,6 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
cdb = (struct scsi_write_using_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -2264,7 +2194,6 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -2389,7 +2318,7 @@ done:
int
ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_rod_token_information *cdb;
struct scsi_receive_copy_status_lid4_data *data;
struct tpc_list *list;
@@ -2402,8 +2331,6 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = scsi_4btoul(cdb->list_identifier);
@@ -2430,20 +2357,10 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
@@ -2487,8 +2404,7 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
int
ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
{
- struct ctl_softc *softc;
- struct ctl_lun *lun;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
struct scsi_report_all_rod_tokens *cdb;
struct scsi_report_all_rod_tokens_data *data;
struct tpc_token *token;
@@ -2498,9 +2414,6 @@ ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
-
retval = CTL_RETVAL_COMPLETE;
tokens = 0;
@@ -2515,20 +2428,10 @@ ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
i = 0;
diff --git a/sys/cam/ctl/ctl_tpc_local.c b/sys/cam/ctl/ctl_tpc_local.c
index 4f368f9..e5e77b4 100644
--- a/sys/cam/ctl/ctl_tpc_local.c
+++ b/sys/cam/ctl/ctl_tpc_local.c
@@ -65,7 +65,7 @@ struct tpcl_softc {
static struct tpcl_softc tpcl_softc;
static int tpcl_init(void);
-static void tpcl_shutdown(void);
+static int tpcl_shutdown(void);
static void tpcl_datamove(union ctl_io *io);
static void tpcl_done(union ctl_io *io);
@@ -84,7 +84,7 @@ tpcl_init(void)
struct tpcl_softc *tsoftc = &tpcl_softc;
struct ctl_port *port;
struct scsi_transportid_spi *tid;
- int len;
+ int error, len;
memset(tsoftc, 0, sizeof(*tsoftc));
@@ -100,9 +100,9 @@ tpcl_init(void)
port->targ_port = -1;
port->max_initiators = 1;
- if (ctl_port_register(port) != 0) {
- printf("%s: ctl_port_register() failed with error\n", __func__);
- return (0);
+ if ((error = ctl_port_register(port)) != 0) {
+ printf("%s: tpc port registration failed\n", __func__);
+ return (error);
}
len = sizeof(struct scsi_transportid_spi);
@@ -118,16 +118,17 @@ tpcl_init(void)
return (0);
}
-void
+static int
tpcl_shutdown(void)
{
struct tpcl_softc *tsoftc = &tpcl_softc;
- struct ctl_port *port;
+ struct ctl_port *port = &tsoftc->port;
+ int error;
- port = &tsoftc->port;
ctl_port_offline(port);
- if (ctl_port_deregister(&tsoftc->port) != 0)
- printf("%s: ctl_frontend_deregister() failed\n", __func__);
+ if ((error = ctl_port_deregister(port)) != 0)
+ printf("%s: tpc port deregistration failed\n", __func__);
+ return (error);
}
static void
@@ -137,7 +138,7 @@ tpcl_datamove(union ctl_io *io)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int kern_watermark, ext_watermark;
struct ctl_scsiio *ctsio;
int i, j;
@@ -196,7 +197,6 @@ tpcl_datamove(union ctl_io *io)
kern_watermark = 0;
ext_watermark = ext_offset;
- len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@@ -218,9 +218,6 @@ tpcl_datamove(union ctl_io *io)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
- kern_watermark += len_to_copy;
- ext_watermark += len_to_copy;
-
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
@@ -236,27 +233,27 @@ tpcl_datamove(union ctl_io *io)
memcpy(kern_ptr, ext_ptr, len_to_copy);
}
- len_copied += len_to_copy;
+ ctsio->ext_data_filled += len_to_copy;
+ ctsio->kern_data_resid -= len_to_copy;
+ ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
+ kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
- ctsio->ext_data_filled += len_copied;
-
CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
__func__, ext_sg_entries, kern_sg_entries));
CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
__func__, ctsio->ext_data_len, ctsio->kern_data_len));
- /* XXX KDM set residual?? */
bailout:
io->scsiio.be_move_done(io);
}
@@ -290,7 +287,7 @@ tpcl_resolve(struct ctl_softc *softc, int init_port,
port = NULL;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (port != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
if (lun->lun_devid == NULL)
continue;
diff --git a/sys/cam/ctl/ctl_util.c b/sys/cam/ctl/ctl_util.c
index 6fcec03..33f0899 100644
--- a/sys/cam/ctl/ctl_util.c
+++ b/sys/cam/ctl/ctl_util.c
@@ -697,7 +697,6 @@ ctl_scsi_free_io(union ctl_io *io)
free(io);
}
-#endif /* !_KERNEL */
void
ctl_scsi_zero_io(union ctl_io *io)
{
@@ -707,11 +706,10 @@ ctl_scsi_zero_io(union ctl_io *io)
return;
pool_ref = io->io_hdr.pool;
-
memset(io, 0, sizeof(*io));
-
io->io_hdr.pool = pool_ref;
}
+#endif /* !_KERNEL */
const char *
ctl_scsi_task_string(struct ctl_taskio *taskio)
diff --git a/sys/cam/ctl/ctl_util.h b/sys/cam/ctl/ctl_util.h
index 2966b49..67c0915 100644
--- a/sys/cam/ctl/ctl_util.h
+++ b/sys/cam/ctl/ctl_util.h
@@ -96,8 +96,10 @@ void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr,
#ifndef _KERNEL
union ctl_io *ctl_scsi_alloc_io(uint32_t initid);
void ctl_scsi_free_io(union ctl_io *io);
-#endif /* !_KERNEL */
void ctl_scsi_zero_io(union ctl_io *io);
+#else
+#define ctl_scsi_zero_io(io) ctl_zero_io(io)
+#endif /* !_KERNEL */
const char *ctl_scsi_task_string(struct ctl_taskio *taskio);
void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb);
void ctl_io_error_sbuf(union ctl_io *io,
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index f403391..1d3e048 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -185,8 +185,11 @@ MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
/* This is only used in the CTIO */
#define ccb_atio ppriv_ptr1
-int ctlfeinitialize(void);
-void ctlfeshutdown(void);
+#define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
+#define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
+
+static int ctlfeinitialize(void);
+static int ctlfeshutdown(void);
static periph_init_t ctlfeperiphinit;
static void ctlfeasync(void *callback_arg, uint32_t code,
struct cam_path *path, void *arg);
@@ -224,13 +227,15 @@ static struct ctl_frontend ctlfe_frontend =
};
CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
-void
+static int
ctlfeshutdown(void)
{
- return;
+
+ /* CAM does not support periph driver unregister now. */
+ return (EBUSY);
}
-int
+static int
ctlfeinitialize(void)
{
@@ -240,7 +245,7 @@ ctlfeinitialize(void)
return (0);
}
-void
+static void
ctlfeperiphinit(void)
{
cam_status status;
@@ -554,7 +559,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
status = CAM_RESRC_UNAVAIL;
break;
}
- new_io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr = cmd_info;
+ PRIV_INFO(new_io) = cmd_info;
softc->atios_alloced++;
new_ccb->ccb_h.io_ptr = new_io;
@@ -702,7 +707,7 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
size_t off;
int i, idx;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
bus_softc = softc->parent_softc;
/*
@@ -718,15 +723,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
idx = cmd_info->cur_transfer_index;
off = cmd_info->cur_transfer_off;
cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
- if (io->scsiio.kern_sg_entries == 0) {
- /* No S/G list. */
+ if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */
+
+ /* One time shift for SRR offset. */
+ off += io->scsiio.ext_data_filled;
+ io->scsiio.ext_data_filled = 0;
+
*data_ptr = io->scsiio.kern_data_ptr + off;
if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
*dxfer_len = io->scsiio.kern_data_len - off;
} else {
*dxfer_len = bus_softc->maxio;
- cmd_info->cur_transfer_index = -1;
- cmd_info->cur_transfer_off = bus_softc->maxio;
+ cmd_info->cur_transfer_off += bus_softc->maxio;
cmd_info->flags |= CTLFE_CMD_PIECEWISE;
}
*sglist_cnt = 0;
@@ -735,9 +743,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
*flags |= CAM_DATA_PADDR;
else
*flags |= CAM_DATA_VADDR;
- } else {
- /* S/G list with physical or virtual pointers. */
+ } else { /* S/G list with physical or virtual pointers. */
ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+
+ /* One time shift for SRR offset. */
+ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
+ io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
+ idx++;
+ off = 0;
+ }
+ off += io->scsiio.ext_data_filled;
+ io->scsiio.ext_data_filled = 0;
+
cam_sglist = cmd_info->cam_sglist;
*dxfer_len = 0;
for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
@@ -806,7 +823,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
flags = atio->ccb_h.flags &
(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
cmd_info->cur_transfer_index = 0;
cmd_info->cur_transfer_off = 0;
cmd_info->flags = 0;
@@ -815,18 +832,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
/*
* Datamove call, we need to setup the S/G list.
*/
- scsi_status = 0;
- csio->cdb_len = atio->cdb_len;
ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
&csio->sglist_cnt);
- io->scsiio.ext_data_filled += dxfer_len;
- if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) {
- xpt_print(periph->path, "%s: tag 0x%04x "
- "fill len %u > total %u\n",
- __func__, io->scsiio.tag_num,
- io->scsiio.ext_data_filled,
- io->scsiio.kern_total_len);
- }
} else {
/*
* We're done, send status back.
@@ -888,8 +895,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
data_ptr = NULL;
dxfer_len = 0;
csio->sglist_cnt = 0;
- scsi_status = 0;
}
+ scsi_status = 0;
if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
(cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
@@ -938,7 +945,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
&& (csio->sglist_cnt != 0))) {
printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
"%d sg %u\n", __func__, atio->tag_id,
- atio->cdb_io.cdb_bytes[0], flags, dxfer_len,
+ atio_cdb_ptr(atio)[0], flags, dxfer_len,
csio->sglist_cnt);
printf("%s: tag %04x io status %#x\n", __func__,
atio->tag_id, io->io_hdr.status);
@@ -987,7 +994,7 @@ ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
switch (ccb->ccb_h.func_code) {
case XPT_ACCEPT_TARGET_IO:
softc->atios_freed++;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
free(cmd_info, M_CTLFE);
break;
case XPT_IMMEDIATE_NOTIFY:
@@ -1024,8 +1031,7 @@ ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
{
uint64_t lba;
uint32_t num_blocks, nbc;
- uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)?
- atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes;
+ uint8_t *cmdbyt = atio_cdb_ptr(atio);
nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */
@@ -1154,12 +1160,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
mtx_unlock(mtx);
io = done_ccb->ccb_h.io_ptr;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
ctl_zero_io(io);
/* Save pointers on both sides */
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb;
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr = cmd_info;
+ PRIV_CCB(io) = done_ccb;
+ PRIV_INFO(io) = cmd_info;
done_ccb->ccb_h.io_ptr = io;
/*
@@ -1203,8 +1209,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
__func__, atio->cdb_len, sizeof(io->scsiio.cdb));
}
io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
- bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb,
- io->scsiio.cdb_len);
+ bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
#ifdef CTLFEDEBUG
printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
@@ -1245,13 +1250,36 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
| (done_ccb->csio.msg_ptr[6]);
}
+ /*
+ * If we have an SRR and we're still sending data, we
+ * should be able to adjust offsets and cycle again.
+ * It is possible only if offset is from this datamove.
+ */
+ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
+ srr_off >= io->scsiio.kern_rel_offset &&
+ srr_off < io->scsiio.kern_rel_offset +
+ io->scsiio.kern_data_len) {
+ io->scsiio.kern_data_resid =
+ io->scsiio.kern_rel_offset +
+ io->scsiio.kern_data_len - srr_off;
+ io->scsiio.ext_data_filled = srr_off;
+ io->scsiio.io_hdr.status = CTL_STATUS_NONE;
+ io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
+ softc->ccbs_freed++;
+ xpt_release_ccb(done_ccb);
+ TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
+ periph_links.tqe);
+ xpt_schedule(periph, /*priority*/ 1);
+ break;
+ }
+
+ /*
+ * If status was being sent, the back end data is now history.
+ * Hack it up and resubmit a new command with the CDB adjusted.
+ * If the SIM does the right thing, all of the resid math
+ * should work.
+ */
if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
- /*
- * If status was being sent, the back end data is now
- * history. Hack it up and resubmit a new command with
- * the CDB adjusted. If the SIM does the right thing,
- * all of the resid math should work.
- */
softc->ccbs_freed++;
xpt_release_ccb(done_ccb);
if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
@@ -1261,22 +1289,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
/*
* Fall through to doom....
*/
- } else if (srr) {
- /*
- * If we have an srr and we're still sending data, we
- * should be able to adjust offsets and cycle again.
- */
- io->scsiio.kern_rel_offset =
- io->scsiio.ext_data_filled = srr_off;
- io->scsiio.ext_data_len = io->scsiio.kern_total_len -
- io->scsiio.kern_rel_offset;
- softc->ccbs_freed++;
- io->scsiio.io_hdr.status = CTL_STATUS_NONE;
- xpt_release_ccb(done_ccb);
- TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
- periph_links.tqe);
- xpt_schedule(periph, /*priority*/ 1);
- break;
}
if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
@@ -1315,20 +1327,10 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
struct ccb_scsiio *csio;
csio = &done_ccb->csio;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
- io->scsiio.ext_data_len += csio->dxfer_len;
- if (io->scsiio.ext_data_len >
- io->scsiio.kern_total_len) {
- xpt_print(periph->path, "%s: tag 0x%04x "
- "done len %u > total %u sent %u\n",
- __func__, io->scsiio.tag_num,
- io->scsiio.ext_data_len,
- io->scsiio.kern_total_len,
- io->scsiio.ext_data_filled);
- }
/*
* Translate CAM status to CTL status. Success
* does not change the overall, ctl_io status. In
@@ -1338,6 +1340,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
case CAM_REQ_CMP:
+ io->scsiio.kern_data_resid -= csio->dxfer_len;
io->io_hdr.port_status = 0;
break;
default:
@@ -1367,7 +1370,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
&& (io->io_hdr.port_status == 0)) {
ccb_flags flags;
- uint8_t scsi_status;
uint8_t *data_ptr;
uint32_t dxfer_len;
@@ -1378,14 +1380,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
ctlfedata(softc, io, &flags, &data_ptr,
&dxfer_len, &csio->sglist_cnt);
- scsi_status = 0;
-
if (((flags & CAM_SEND_STATUS) == 0)
&& (dxfer_len == 0)) {
printf("%s: tag %04x no status or "
"len cdb = %02x\n", __func__,
atio->tag_id,
- atio->cdb_io.cdb_bytes[0]);
+ atio_cdb_ptr(atio)[0]);
printf("%s: tag %04x io status %#x\n",
__func__, atio->tag_id,
io->io_hdr.status);
@@ -1399,7 +1399,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
MSG_SIMPLE_Q_TAG : 0,
atio->tag_id,
atio->init_id,
- scsi_status,
+ 0,
/*data_ptr*/ data_ptr,
/*dxfer_len*/ dxfer_len,
/*timeout*/ 5 * 1000);
@@ -1444,7 +1444,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
send_ctl_io = 1;
io->io_hdr.io_type = CTL_IO_TASK;
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
+ PRIV_CCB(io) = done_ccb;
inot->ccb_h.io_ptr = io;
io->io_hdr.nexus.initid = inot->initiator_id;
io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
@@ -2002,7 +2002,8 @@ ctlfe_datamove(union ctl_io *io)
KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
- ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ io->scsiio.ext_data_filled = 0;
+ ccb = PRIV_CCB(io);
periph = xpt_path_periph(ccb->ccb_h.path);
cam_periph_lock(periph);
softc = (struct ctlfe_lun_softc *)periph->softc;
@@ -2022,7 +2023,7 @@ ctlfe_done(union ctl_io *io)
struct cam_periph *periph;
struct ctlfe_lun_softc *softc;
- ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ ccb = PRIV_CCB(io);
periph = xpt_path_periph(ccb->ccb_h.path);
cam_periph_lock(periph);
softc = (struct ctlfe_lun_softc *)periph->softc;
diff --git a/sys/cam/nvme/nvme_da.c b/sys/cam/nvme/nvme_da.c
index 9628530..13ecf43 100644
--- a/sys/cam/nvme/nvme_da.c
+++ b/sys/cam/nvme/nvme_da.c
@@ -761,7 +761,7 @@ ndaregister(struct cam_periph *periph, void *arg)
MIN(sizeof(softc->disk->d_descr), sizeof(cd->mn)));
strlcpy(softc->disk->d_ident, cd->sn,
MIN(sizeof(softc->disk->d_ident), sizeof(cd->sn)));
- disk->d_rotation_rate = 0; /* Spinning rust need not apply */
+ disk->d_rotation_rate = DISK_RR_NON_ROTATING;
disk->d_open = ndaopen;
disk->d_close = ndaclose;
disk->d_strategy = ndastrategy;
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 29e5ac6..73e3121 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -1371,7 +1371,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */
"Information unit too long") },
/* DT P R MAEBK F */
- { SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */
+ { SST(0x0E, 0x03, SS_FATAL | EINVAL,
"Invalid field in command information unit") },
/* D W O BK */
{ SST(0x10, 0x00, SS_RDEF,
@@ -3617,15 +3617,9 @@ scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio,
#endif /* _KERNEL/!_KERNEL */
- if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
- sbuf_printf(sb, "%s. CDB: ",
- scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data));
- scsi_cdb_sbuf(csio->cdb_io.cdb_ptr, sb);
- } else {
- sbuf_printf(sb, "%s. CDB: ",
- scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data));
- scsi_cdb_sbuf(csio->cdb_io.cdb_bytes, sb);
- }
+ sbuf_printf(sb, "%s. CDB: ",
+ scsi_op_desc(scsiio_cdb_ptr(csio)[0], inq_data));
+ scsi_cdb_sbuf(scsiio_cdb_ptr(csio), sb);
#ifdef _KERNEL
xpt_free_ccb((union ccb *)cgd);
@@ -5030,7 +5024,6 @@ scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio,
struct ccb_getdev *cgd;
#endif /* _KERNEL */
char path_str[64];
- uint8_t *cdb;
#ifndef _KERNEL
if (device == NULL)
@@ -5128,14 +5121,9 @@ scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio,
sense = &csio->sense_data;
}
- if (csio->ccb_h.flags & CAM_CDB_POINTER)
- cdb = csio->cdb_io.cdb_ptr;
- else
- cdb = csio->cdb_io.cdb_bytes;
-
scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb,
- path_str, inq_data, cdb, csio->cdb_len);
-
+ path_str, inq_data, scsiio_cdb_ptr(csio), csio->cdb_len);
+
#ifdef _KERNEL
xpt_free_ccb((union ccb*)cgd);
#endif /* _KERNEL/!_KERNEL */
@@ -7622,24 +7610,34 @@ scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
}
void
-scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *, union ccb *),
- u_int8_t tag_action, int dbd, u_int8_t page_code,
- u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
- u_int8_t sense_len, u_int32_t timeout)
+scsi_mode_sense(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+ int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len,
+ uint8_t sense_len, uint32_t timeout)
{
- scsi_mode_sense_len(csio, retries, cbfcnp, tag_action, dbd,
- page_code, page, param_buf, param_len, 0,
- sense_len, timeout);
+ scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd,
+ pc, page, 0, param_buf, param_len, 0, sense_len, timeout);
}
void
-scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *, union ccb *),
- u_int8_t tag_action, int dbd, u_int8_t page_code,
- u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
- int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
+scsi_mode_sense_len(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+ int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len,
+ int minimum_cmd_size, uint8_t sense_len, uint32_t timeout)
+{
+
+ scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd,
+ pc, page, 0, param_buf, param_len, minimum_cmd_size,
+ sense_len, timeout);
+}
+
+void
+scsi_mode_sense_subpage(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+ int dbd, uint8_t pc, uint8_t page, uint8_t subpage, uint8_t *param_buf,
+ uint32_t param_len, int minimum_cmd_size, uint8_t sense_len,
+ uint32_t timeout)
{
u_int8_t cdb_len;
@@ -7658,7 +7656,8 @@ scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
scsi_cmd->opcode = MODE_SENSE_6;
if (dbd != 0)
scsi_cmd->byte2 |= SMS_DBD;
- scsi_cmd->page = page_code | page;
+ scsi_cmd->page = pc | page;
+ scsi_cmd->subpage = subpage;
scsi_cmd->length = param_len;
cdb_len = sizeof(*scsi_cmd);
} else {
@@ -7672,7 +7671,8 @@ scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
scsi_cmd->opcode = MODE_SENSE_10;
if (dbd != 0)
scsi_cmd->byte2 |= SMS_DBD;
- scsi_cmd->page = page_code | page;
+ scsi_cmd->page = pc | page;
+ scsi_cmd->subpage = subpage;
scsi_ulto2b(param_len, scsi_cmd->length);
cdb_len = sizeof(*scsi_cmd);
}
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 365b9dd..64c45fb 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -228,6 +228,7 @@ struct scsi_mode_select_6
u_int8_t opcode;
u_int8_t byte2;
#define SMS_SP 0x01
+#define SMS_RTD 0x02
#define SMS_PF 0x10
u_int8_t unused[2];
u_int8_t length;
@@ -3976,21 +3977,24 @@ void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
u_int8_t sense_len, u_int32_t timeout);
void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *,
- union ccb *),
- u_int8_t tag_action, int dbd,
- u_int8_t page_code, u_int8_t page,
- u_int8_t *param_buf, u_int32_t param_len,
- u_int8_t sense_len, u_int32_t timeout);
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int dbd, uint8_t pc, uint8_t page,
+ uint8_t *param_buf, uint32_t param_len,
+ uint8_t sense_len, uint32_t timeout);
void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *,
- union ccb *),
- u_int8_t tag_action, int dbd,
- u_int8_t page_code, u_int8_t page,
- u_int8_t *param_buf, u_int32_t param_len,
- int minimum_cmd_size, u_int8_t sense_len,
- u_int32_t timeout);
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int dbd, uint8_t pc, uint8_t page,
+ uint8_t *param_buf, uint32_t param_len,
+ int minimum_cmd_size, uint8_t sense_len, uint32_t timeout);
+
+void scsi_mode_sense_subpage(struct ccb_scsiio *csio,
+ uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int dbd, uint8_t pc,
+ uint8_t page, uint8_t subpage,
+ uint8_t *param_buf, uint32_t param_len,
+ int minimum_cmd_size, uint8_t sense_len, uint32_t timeout);
void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
diff --git a/sys/cam/scsi/scsi_ch.c b/sys/cam/scsi/scsi_ch.c
index 59ec6aa..d3be675 100644
--- a/sys/cam/scsi/scsi_ch.c
+++ b/sys/cam/scsi/scsi_ch.c
@@ -586,7 +586,7 @@ chstart(struct cam_periph *periph, union ccb *start_ccb)
/* tag_action */ MSG_SIMPLE_Q_TAG,
/* dbd */ (softc->quirks & CH_Q_NO_DBD) ?
FALSE : TRUE,
- /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* pc */ SMS_PAGE_CTRL_CURRENT,
/* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
/* param_buf */ (u_int8_t *)mode_buffer,
/* param_len */ mode_buffer_len,
@@ -1587,7 +1587,7 @@ chgetparams(struct cam_periph *periph)
/* cbfcnp */ chdone,
/* tag_action */ MSG_SIMPLE_Q_TAG,
/* dbd */ dbd,
- /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* pc */ SMS_PAGE_CTRL_CURRENT,
/* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
/* param_buf */ (u_int8_t *)mode_buffer,
/* param_len */ mode_buffer_len,
@@ -1650,7 +1650,7 @@ chgetparams(struct cam_periph *periph)
/* cbfcnp */ chdone,
/* tag_action */ MSG_SIMPLE_Q_TAG,
/* dbd */ dbd,
- /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* pc */ SMS_PAGE_CTRL_CURRENT,
/* page */ CH_DEVICE_CAP_PAGE,
/* param_buf */ (u_int8_t *)mode_buffer,
/* param_len */ mode_buffer_len,
diff --git a/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c b/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c
index 4f2619d..4797816 100644
--- a/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c
+++ b/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c
@@ -1627,7 +1627,7 @@ fasttrap_pid_probe(struct reg *rp)
* a signal we can reset the value of the scratch register.
*/
- ASSERT(tp->ftt_size < FASTTRAP_MAX_INSTR_SIZE);
+ ASSERT(tp->ftt_size <= FASTTRAP_MAX_INSTR_SIZE);
curthread->t_dtrace_scrpc = addr;
bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size);
diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c
index 838d1aa..b491a85 100644
--- a/sys/compat/freebsd32/freebsd32_misc.c
+++ b/sys/compat/freebsd32/freebsd32_misc.c
@@ -244,7 +244,8 @@ copy_statfs(struct statfs *in, struct statfs32 *out)
#ifdef COMPAT_FREEBSD4
int
-freebsd4_freebsd32_getfsstat(struct thread *td, struct freebsd4_freebsd32_getfsstat_args *uap)
+freebsd4_freebsd32_getfsstat(struct thread *td,
+ struct freebsd4_freebsd32_getfsstat_args *uap)
{
struct statfs *buf, *sp;
struct statfs32 stat32;
@@ -253,7 +254,7 @@ freebsd4_freebsd32_getfsstat(struct thread *td, struct freebsd4_freebsd32_getfss
count = uap->bufsize / sizeof(struct statfs32);
size = count * sizeof(struct statfs);
- error = kern_getfsstat(td, &buf, size, &count, UIO_SYSSPACE, uap->flags);
+ error = kern_getfsstat(td, &buf, size, &count, UIO_SYSSPACE, uap->mode);
if (size > 0) {
sp = buf;
copycount = count;
@@ -264,7 +265,7 @@ freebsd4_freebsd32_getfsstat(struct thread *td, struct freebsd4_freebsd32_getfss
uap->buf++;
copycount--;
}
- free(buf, M_TEMP);
+ free(buf, M_STATFS);
}
if (error == 0)
td->td_retval[0] = count;
@@ -1393,14 +1394,17 @@ int
freebsd4_freebsd32_statfs(struct thread *td, struct freebsd4_freebsd32_statfs_args *uap)
{
struct statfs32 s32;
- struct statfs s;
+ struct statfs *sp;
int error;
- error = kern_statfs(td, uap->path, UIO_USERSPACE, &s);
- if (error)
- return (error);
- copy_statfs(&s, &s32);
- return (copyout(&s32, uap->buf, sizeof(s32)));
+ sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, uap->path, UIO_USERSPACE, sp);
+ if (error == 0) {
+ copy_statfs(sp, &s32);
+ error = copyout(&s32, uap->buf, sizeof(s32));
+ }
+ free(sp, M_STATFS);
+ return (error);
}
#endif
@@ -1409,14 +1413,17 @@ int
freebsd4_freebsd32_fstatfs(struct thread *td, struct freebsd4_freebsd32_fstatfs_args *uap)
{
struct statfs32 s32;
- struct statfs s;
+ struct statfs *sp;
int error;
- error = kern_fstatfs(td, uap->fd, &s);
- if (error)
- return (error);
- copy_statfs(&s, &s32);
- return (copyout(&s32, uap->buf, sizeof(s32)));
+ sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, uap->fd, sp);
+ if (error == 0) {
+ copy_statfs(sp, &s32);
+ error = copyout(&s32, uap->buf, sizeof(s32));
+ }
+ free(sp, M_STATFS);
+ return (error);
}
#endif
@@ -1425,17 +1432,20 @@ int
freebsd4_freebsd32_fhstatfs(struct thread *td, struct freebsd4_freebsd32_fhstatfs_args *uap)
{
struct statfs32 s32;
- struct statfs s;
+ struct statfs *sp;
fhandle_t fh;
int error;
if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0)
return (error);
- error = kern_fhstatfs(td, fh, &s);
- if (error)
- return (error);
- copy_statfs(&s, &s32);
- return (copyout(&s32, uap->buf, sizeof(s32)));
+ sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fhstatfs(td, fh, sp);
+ if (error == 0) {
+ copy_statfs(sp, &s32);
+ error = copyout(&s32, uap->buf, sizeof(s32));
+ }
+ free(sp, M_STATFS);
+ return (error);
}
#endif
diff --git a/sys/compat/freebsd32/freebsd32_proto.h b/sys/compat/freebsd32/freebsd32_proto.h
index 5bc7a22..2ef4bbe 100644
--- a/sys/compat/freebsd32/freebsd32_proto.h
+++ b/sys/compat/freebsd32/freebsd32_proto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 306586 2016-10-02 16:13:18Z kib
+ * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
#ifndef _FREEBSD32_SYSPROTO_H_
@@ -917,7 +917,7 @@ int ofreebsd32_getdirentries(struct thread *, struct ofreebsd32_getdirentries_ar
struct freebsd4_freebsd32_getfsstat_args {
char buf_l_[PADL_(struct statfs32 *)]; struct statfs32 * buf; char buf_r_[PADR_(struct statfs32 *)];
char bufsize_l_[PADL_(long)]; long bufsize; char bufsize_r_[PADR_(long)];
- char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
+ char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)];
};
struct freebsd4_freebsd32_statfs_args {
char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
diff --git a/sys/compat/freebsd32/freebsd32_syscall.h b/sys/compat/freebsd32/freebsd32_syscall.h
index d367534..a80e1e4 100644
--- a/sys/compat/freebsd32/freebsd32_syscall.h
+++ b/sys/compat/freebsd32/freebsd32_syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 306586 2016-10-02 16:13:18Z kib
+ * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
#define FREEBSD32_SYS_syscall 0
diff --git a/sys/compat/freebsd32/freebsd32_syscalls.c b/sys/compat/freebsd32/freebsd32_syscalls.c
index 3175f2b..6b3b4a0 100644
--- a/sys/compat/freebsd32/freebsd32_syscalls.c
+++ b/sys/compat/freebsd32/freebsd32_syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 306586 2016-10-02 16:13:18Z kib
+ * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
const char *freebsd32_syscallnames[] = {
diff --git a/sys/compat/freebsd32/freebsd32_sysent.c b/sys/compat/freebsd32/freebsd32_sysent.c
index ff7445c..9a5547f 100644
--- a/sys/compat/freebsd32/freebsd32_sysent.c
+++ b/sys/compat/freebsd32/freebsd32_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 306586 2016-10-02 16:13:18Z kib
+ * created from FreeBSD: stable/11/sys/compat/freebsd32/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
#include "opt_compat.h"
diff --git a/sys/compat/freebsd32/freebsd32_systrace_args.c b/sys/compat/freebsd32/freebsd32_systrace_args.c
index cc75370..ed4722a 100644
--- a/sys/compat/freebsd32/freebsd32_systrace_args.c
+++ b/sys/compat/freebsd32/freebsd32_systrace_args.c
@@ -1969,7 +1969,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct getfsstat_args *p = params;
uarg[0] = (intptr_t) p->buf; /* struct statfs * */
iarg[1] = p->bufsize; /* long */
- iarg[2] = p->flags; /* int */
+ iarg[2] = p->mode; /* int */
*n_args = 3;
break;
}
diff --git a/sys/compat/freebsd32/syscalls.master b/sys/compat/freebsd32/syscalls.master
index 5ba9793..276d79c 100644
--- a/sys/compat/freebsd32/syscalls.master
+++ b/sys/compat/freebsd32/syscalls.master
@@ -89,7 +89,7 @@
obreak_args int
18 AUE_GETFSSTAT COMPAT4 { int freebsd32_getfsstat( \
struct statfs32 *buf, long bufsize, \
- int flags); }
+ int mode); }
19 AUE_LSEEK COMPAT { int freebsd32_lseek(int fd, int offset, \
int whence); }
20 AUE_GETPID NOPROTO { pid_t getpid(void); }
@@ -712,7 +712,7 @@
off_t *sbytes, int flags); }
394 AUE_NULL UNIMPL mac_syscall
395 AUE_GETFSSTAT NOPROTO { int getfsstat(struct statfs *buf, \
- long bufsize, int flags); }
+ long bufsize, int mode); }
396 AUE_STATFS NOPROTO { int statfs(char *path, \
struct statfs *buf); }
397 AUE_FSTATFS NOPROTO { int fstatfs(int fd, struct statfs *buf); }
diff --git a/sys/compat/linux/linux_stats.c b/sys/compat/linux/linux_stats.c
index 3638c6b..f74fa803 100644
--- a/sys/compat/linux/linux_stats.c
+++ b/sys/compat/linux/linux_stats.c
@@ -415,7 +415,7 @@ int
linux_statfs(struct thread *td, struct linux_statfs_args *args)
{
struct l_statfs linux_statfs;
- struct statfs bsd_statfs;
+ struct statfs *bsd_statfs;
char *path;
int error;
@@ -425,12 +425,13 @@ linux_statfs(struct thread *td, struct linux_statfs_args *args)
if (ldebug(statfs))
printf(ARGS(statfs, "%s, *"), path);
#endif
- error = kern_statfs(td, path, UIO_SYSSPACE, &bsd_statfs);
+ bsd_statfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, path, UIO_SYSSPACE, bsd_statfs);
LFREEPATH(path);
- if (error)
- return (error);
- error = bsd_to_linux_statfs(&bsd_statfs, &linux_statfs);
- if (error)
+ if (error == 0)
+ error = bsd_to_linux_statfs(bsd_statfs, &linux_statfs);
+ free(bsd_statfs, M_STATFS);
+ if (error != 0)
return (error);
return (copyout(&linux_statfs, args->buf, sizeof(linux_statfs)));
}
@@ -456,7 +457,7 @@ int
linux_statfs64(struct thread *td, struct linux_statfs64_args *args)
{
struct l_statfs64 linux_statfs;
- struct statfs bsd_statfs;
+ struct statfs *bsd_statfs;
char *path;
int error;
@@ -469,11 +470,14 @@ linux_statfs64(struct thread *td, struct linux_statfs64_args *args)
if (ldebug(statfs64))
printf(ARGS(statfs64, "%s, *"), path);
#endif
- error = kern_statfs(td, path, UIO_SYSSPACE, &bsd_statfs);
+ bsd_statfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, path, UIO_SYSSPACE, bsd_statfs);
LFREEPATH(path);
- if (error)
+ if (error == 0)
+ bsd_to_linux_statfs64(bsd_statfs, &linux_statfs);
+ free(bsd_statfs, M_STATFS);
+ if (error != 0)
return (error);
- bsd_to_linux_statfs64(&bsd_statfs, &linux_statfs);
return (copyout(&linux_statfs, args->buf, sizeof(linux_statfs)));
}
@@ -481,7 +485,7 @@ int
linux_fstatfs64(struct thread *td, struct linux_fstatfs64_args *args)
{
struct l_statfs64 linux_statfs;
- struct statfs bsd_statfs;
+ struct statfs *bsd_statfs;
int error;
#ifdef DEBUG
@@ -491,10 +495,13 @@ linux_fstatfs64(struct thread *td, struct linux_fstatfs64_args *args)
if (args->bufsize != sizeof(struct l_statfs64))
return (EINVAL);
- error = kern_fstatfs(td, args->fd, &bsd_statfs);
- if (error)
- return error;
- bsd_to_linux_statfs64(&bsd_statfs, &linux_statfs);
+ bsd_statfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, args->fd, bsd_statfs);
+ if (error == 0)
+ bsd_to_linux_statfs64(bsd_statfs, &linux_statfs);
+ free(bsd_statfs, M_STATFS);
+ if (error != 0)
+ return (error);
return (copyout(&linux_statfs, args->buf, sizeof(linux_statfs)));
}
#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
@@ -503,18 +510,19 @@ int
linux_fstatfs(struct thread *td, struct linux_fstatfs_args *args)
{
struct l_statfs linux_statfs;
- struct statfs bsd_statfs;
+ struct statfs *bsd_statfs;
int error;
#ifdef DEBUG
if (ldebug(fstatfs))
printf(ARGS(fstatfs, "%d, *"), args->fd);
#endif
- error = kern_fstatfs(td, args->fd, &bsd_statfs);
- if (error)
- return (error);
- error = bsd_to_linux_statfs(&bsd_statfs, &linux_statfs);
- if (error)
+ bsd_statfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, args->fd, bsd_statfs);
+ if (error == 0)
+ error = bsd_to_linux_statfs(bsd_statfs, &linux_statfs);
+ free(bsd_statfs, M_STATFS);
+ if (error != 0)
return (error);
return (copyout(&linux_statfs, args->buf, sizeof(linux_statfs)));
}
diff --git a/sys/compat/linuxkpi/common/include/linux/cdev.h b/sys/compat/linuxkpi/common/include/linux/cdev.h
index 46bf8e6..856307c 100644
--- a/sys/compat/linuxkpi/common/include/linux/cdev.h
+++ b/sys/compat/linuxkpi/common/include/linux/cdev.h
@@ -95,7 +95,7 @@ cdev_add(struct linux_cdev *cdev, dev_t dev, unsigned count)
args.mda_gid = 0;
args.mda_mode = 0700;
args.mda_si_drv1 = cdev;
- args.mda_unit = MINOR(dev);
+ args.mda_unit = dev;
error = make_dev_s(&args, &cdev->cdev, "%s",
kobject_name(&cdev->kobj));
@@ -121,7 +121,7 @@ cdev_add_ext(struct linux_cdev *cdev, dev_t dev, uid_t uid, gid_t gid, int mode)
args.mda_gid = gid;
args.mda_mode = mode;
args.mda_si_drv1 = cdev;
- args.mda_unit = MINOR(dev);
+ args.mda_unit = dev;
error = make_dev_s(&args, &cdev->cdev, "%s/%d",
kobject_name(&cdev->kobj), MINOR(dev));
@@ -142,6 +142,8 @@ cdev_del(struct linux_cdev *cdev)
kobject_put(&cdev->kobj);
}
+struct linux_cdev *linux_find_cdev(const char *name, unsigned major, unsigned minor);
+
#define cdev linux_cdev
#endif /* _LINUX_CDEV_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/device.h b/sys/compat/linuxkpi/common/include/linux/device.h
index 3bb1e03..19e9c6e 100644
--- a/sys/compat/linuxkpi/common/include/linux/device.h
+++ b/sys/compat/linuxkpi/common/include/linux/device.h
@@ -63,6 +63,14 @@ struct device {
struct device *parent;
struct list_head irqents;
device_t bsddev;
+ /*
+ * The following flag is used to determine if the LinuxKPI is
+ * responsible for detaching the BSD device or not. If the
+ * LinuxKPI got the BSD device using devclass_get_device(), it
+ * must not try to detach or delete it, because it's already
+ * done somewhere else.
+ */
+ bool bsddev_attached_here;
dev_t devt;
struct class *class;
void (*release)(struct device *dev);
@@ -208,23 +216,36 @@ static inline struct device *kobj_to_dev(struct kobject *kobj)
static inline void
device_initialize(struct device *dev)
{
- device_t bsddev;
+ device_t bsddev = NULL;
+ int unit = -1;
- bsddev = NULL;
if (dev->devt) {
- int unit = MINOR(dev->devt);
+ unit = MINOR(dev->devt);
bsddev = devclass_get_device(dev->class->bsdclass, unit);
+ dev->bsddev_attached_here = false;
+ } else if (dev->parent == NULL) {
+ bsddev = devclass_get_device(dev->class->bsdclass, 0);
+ dev->bsddev_attached_here = false;
+ } else {
+ dev->bsddev_attached_here = true;
+ }
+
+ if (bsddev == NULL && dev->parent != NULL) {
+ bsddev = device_add_child(dev->parent->bsddev,
+ dev->class->kobj.name, unit);
}
+
if (bsddev != NULL)
device_set_softc(bsddev, dev);
dev->bsddev = bsddev;
+ MPASS(dev->bsddev != NULL);
kobject_init(&dev->kobj, &linux_dev_ktype);
}
static inline int
device_add(struct device *dev)
-{
+{
if (dev->bsddev != NULL) {
if (dev->devt == 0)
dev->devt = makedev(0, device_get_unit(dev->bsddev));
@@ -256,13 +277,13 @@ device_create_groups_vargs(struct class *class, struct device *parent,
goto error;
}
- device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
- dev->bsddev = devclass_get_device(dev->class->bsdclass, MINOR(devt));
+ /* device_initialize() needs the class and parent to be set */
+ device_initialize(dev);
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
@@ -298,17 +319,21 @@ device_create_with_groups(struct class *class,
static inline int
device_register(struct device *dev)
{
- device_t bsddev;
- int unit;
+ device_t bsddev = NULL;
+ int unit = -1;
- bsddev = NULL;
- unit = -1;
+ if (dev->bsddev != NULL)
+ goto done;
if (dev->devt) {
unit = MINOR(dev->devt);
bsddev = devclass_get_device(dev->class->bsdclass, unit);
+ dev->bsddev_attached_here = false;
} else if (dev->parent == NULL) {
bsddev = devclass_get_device(dev->class->bsdclass, 0);
+ dev->bsddev_attached_here = false;
+ } else {
+ dev->bsddev_attached_here = true;
}
if (bsddev == NULL && dev->parent != NULL) {
bsddev = device_add_child(dev->parent->bsddev,
@@ -320,6 +345,7 @@ device_register(struct device *dev)
device_set_softc(bsddev, dev);
}
dev->bsddev = bsddev;
+done:
kobject_init(&dev->kobj, &linux_dev_ktype);
kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev));
@@ -334,7 +360,7 @@ device_unregister(struct device *dev)
bsddev = dev->bsddev;
dev->bsddev = NULL;
- if (bsddev != NULL) {
+ if (bsddev != NULL && dev->bsddev_attached_here) {
mtx_lock(&Giant);
device_delete_child(device_get_parent(bsddev), bsddev);
mtx_unlock(&Giant);
@@ -350,7 +376,7 @@ device_del(struct device *dev)
bsddev = dev->bsddev;
dev->bsddev = NULL;
- if (bsddev != NULL) {
+ if (bsddev != NULL && dev->bsddev_attached_here) {
mtx_lock(&Giant);
device_delete_child(device_get_parent(bsddev), bsddev);
mtx_unlock(&Giant);
diff --git a/sys/compat/linuxkpi/common/include/linux/fs.h b/sys/compat/linuxkpi/common/include/linux/fs.h
index 797b816..fd59fcf 100644
--- a/sys/compat/linuxkpi/common/include/linux/fs.h
+++ b/sys/compat/linuxkpi/common/include/linux/fs.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -151,6 +151,39 @@ struct file_operations {
#define FMODE_WRITE FWRITE
#define FMODE_EXEC FEXEC
+int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops);
+int __register_chrdev_p(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops, uid_t uid,
+ gid_t gid, int mode);
+void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name);
+
+static inline void
+unregister_chrdev(unsigned int major, const char *name)
+{
+
+ __unregister_chrdev(major, 0, 256, name);
+}
+
+static inline int
+register_chrdev(unsigned int major, const char *name,
+ const struct file_operations *fops)
+{
+
+ return (__register_chrdev(major, 0, 256, name, fops));
+}
+
+static inline int
+register_chrdev_p(unsigned int major, const char *name,
+ const struct file_operations *fops, uid_t uid, gid_t gid, int mode)
+{
+
+ return (__register_chrdev_p(major, 0, 256, name, fops, uid, gid, mode));
+}
+
static inline int
register_chrdev_region(dev_t dev, unsigned range, const char *name)
{
@@ -184,7 +217,7 @@ static inline dev_t
iminor(struct inode *inode)
{
- return dev2unit(inode->v_rdev);
+ return (minor(dev2unit(inode->v_rdev)));
}
static inline struct inode *
diff --git a/sys/compat/linuxkpi/common/include/linux/gfp.h b/sys/compat/linuxkpi/common/include/linux/gfp.h
index e1f14b0..1c73022 100644
--- a/sys/compat/linuxkpi/common/include/linux/gfp.h
+++ b/sys/compat/linuxkpi/common/include/linux/gfp.h
@@ -136,8 +136,8 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
size_t size;
size = PAGE_SIZE << order;
- page = kmem_alloc_contig(kmem_arena, size, gfp_mask, 0, -1,
- size, 0, VM_MEMATTR_DEFAULT);
+ page = kmem_alloc_contig(kmem_arena, size, gfp_mask,
+ 0, ~(vm_paddr_t)0, size, 0, VM_MEMATTR_DEFAULT);
if (page == 0)
return (NULL);
return (virt_to_page(page));
diff --git a/sys/compat/linuxkpi/common/include/linux/list.h b/sys/compat/linuxkpi/common/include/linux/list.h
index fff5243..d73cbcb 100644
--- a/sys/compat/linuxkpi/common/include/linux/list.h
+++ b/sys/compat/linuxkpi/common/include/linux/list.h
@@ -72,10 +72,18 @@
#define prefetch(x)
+#define LINUX_LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LINUX_LIST_HEAD(name) \
+ struct list_head name = LINUX_LIST_HEAD_INIT(name)
+
+#ifndef LIST_HEAD_DEF
+#define LIST_HEAD_DEF
struct list_head {
struct list_head *next;
struct list_head *prev;
};
+#endif
static inline void
INIT_LIST_HEAD(struct list_head *list)
@@ -91,12 +99,26 @@ list_empty(const struct list_head *head)
return (head->next == head);
}
+static inline int
+list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+
+ return ((next == head) && (next == head->prev));
+}
+
+static inline void
+__list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ WRITE_ONCE(prev->next, next);
+}
+
static inline void
list_del(struct list_head *entry)
{
- entry->next->prev = entry->prev;
- entry->prev->next = entry->next;
+ __list_del(entry->prev, entry->next);
}
static inline void
@@ -183,6 +205,11 @@ list_del_init(struct list_head *entry)
for (p = list_entry((h)->prev, typeof(*p), field); &(p)->field != (h); \
p = list_entry((p)->field.prev, typeof(*p), field))
+#define list_for_each_entry_safe_reverse(p, n, h, field) \
+ for (p = list_entry((h)->prev, typeof(*p), field), \
+ n = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \
+ p = n, n = list_entry(n->field.prev, typeof(*n), field))
+
#define list_for_each_entry_continue_reverse(p, h, field) \
for (p = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \
p = list_entry((p)->field.prev, typeof(*p), field))
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c
index d1011f6..54bd33b 100644
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -1418,6 +1418,82 @@ linux_irq_handler(void *ent)
irqe->handler(irqe->irq, irqe->arg);
}
+struct linux_cdev *
+linux_find_cdev(const char *name, unsigned major, unsigned minor)
+{
+ int unit = MKDEV(major, minor);
+ struct cdev *cdev;
+
+ dev_lock();
+ LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
+ struct linux_cdev *ldev = cdev->si_drv1;
+ if (dev2unit(cdev) == unit &&
+ strcmp(kobject_name(&ldev->kobj), name) == 0) {
+ break;
+ }
+ }
+ dev_unlock();
+
+ return (cdev != NULL ? cdev->si_drv1 : NULL);
+}
+
+int
+__register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops)
+{
+ struct linux_cdev *cdev;
+ int ret = 0;
+ int i;
+
+ for (i = baseminor; i < baseminor + count; i++) {
+ cdev = cdev_alloc();
+ cdev_init(cdev, fops);
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, makedev(major, i), 1);
+ if (ret != 0)
+ break;
+ }
+ return (ret);
+}
+
+int
+__register_chrdev_p(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops, uid_t uid,
+ gid_t gid, int mode)
+{
+ struct linux_cdev *cdev;
+ int ret = 0;
+ int i;
+
+ for (i = baseminor; i < baseminor + count; i++) {
+ cdev = cdev_alloc();
+ cdev_init(cdev, fops);
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
+ if (ret != 0)
+ break;
+ }
+ return (ret);
+}
+
+void
+__unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name)
+{
+ struct linux_cdev *cdevp;
+ int i;
+
+ for (i = baseminor; i < baseminor + count; i++) {
+ cdevp = linux_find_cdev(name, major, i);
+ if (cdevp != NULL)
+ cdev_del(cdevp);
+ }
+}
+
#if defined(__i386__) || defined(__amd64__)
bool linux_cpu_has_clflush;
#endif
diff --git a/sys/compat/svr4/svr4_misc.c b/sys/compat/svr4/svr4_misc.c
index aaed81f..46be42c 100644
--- a/sys/compat/svr4/svr4_misc.c
+++ b/sys/compat/svr4/svr4_misc.c
@@ -1430,17 +1430,20 @@ svr4_sys_statvfs(td, uap)
struct svr4_sys_statvfs_args *uap;
{
struct svr4_statvfs sfs;
- struct statfs bfs;
+ struct statfs *bfs;
char *path;
int error;
CHECKALTEXIST(td, uap->path, &path);
- error = kern_statfs(td, path, UIO_SYSSPACE, &bfs);
+ bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, path, UIO_SYSSPACE, bfs);
free(path, M_TEMP);
- if (error)
+ if (error == 0)
+ bsd_statfs_to_svr4_statvfs(bfs, &sfs);
+ free(bfs, M_STATFS);
+ if (error != 0)
return (error);
- bsd_statfs_to_svr4_statvfs(&bfs, &sfs);
return copyout(&sfs, uap->fs, sizeof(sfs));
}
@@ -1451,13 +1454,16 @@ svr4_sys_fstatvfs(td, uap)
struct svr4_sys_fstatvfs_args *uap;
{
struct svr4_statvfs sfs;
- struct statfs bfs;
+ struct statfs *bfs;
int error;
- error = kern_fstatfs(td, uap->fd, &bfs);
- if (error)
+ bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, uap->fd, bfs);
+ if (error == 0)
+ bsd_statfs_to_svr4_statvfs(bfs, &sfs);
+ free(bfs, M_STATFS);
+ if (error != 0)
return (error);
- bsd_statfs_to_svr4_statvfs(&bfs, &sfs);
return copyout(&sfs, uap->fs, sizeof(sfs));
}
@@ -1468,17 +1474,20 @@ svr4_sys_statvfs64(td, uap)
struct svr4_sys_statvfs64_args *uap;
{
struct svr4_statvfs64 sfs;
- struct statfs bfs;
+ struct statfs *bfs;
char *path;
int error;
CHECKALTEXIST(td, uap->path, &path);
- error = kern_statfs(td, path, UIO_SYSSPACE, &bfs);
+ bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, path, UIO_SYSSPACE, bfs);
free(path, M_TEMP);
- if (error)
+ if (error == 0)
+ bsd_statfs_to_svr4_statvfs64(bfs, &sfs);
+ free(bfs, M_STATFS);
+ if (error != 0)
return (error);
- bsd_statfs_to_svr4_statvfs64(&bfs, &sfs);
return copyout(&sfs, uap->fs, sizeof(sfs));
}
@@ -1489,13 +1498,16 @@ svr4_sys_fstatvfs64(td, uap)
struct svr4_sys_fstatvfs64_args *uap;
{
struct svr4_statvfs64 sfs;
- struct statfs bfs;
+ struct statfs *bfs;
int error;
- error = kern_fstatfs(td, uap->fd, &bfs);
- if (error)
+ bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, uap->fd, bfs);
+ if (error == 0)
+ bsd_statfs_to_svr4_statvfs64(bfs, &sfs);
+ free(bfs, M_STATFS);
+ if (error != 0)
return (error);
- bsd_statfs_to_svr4_statvfs64(&bfs, &sfs);
return copyout(&sfs, uap->fs, sizeof(sfs));
}
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 5421449..6d15ae4 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -769,8 +769,7 @@ options NETGRAPH_IPFW
options NETGRAPH_KSOCKET
options NETGRAPH_L2TP
options NETGRAPH_LMI
-# MPPC compression requires proprietary files (not included)
-#options NETGRAPH_MPPC_COMPRESSION
+options NETGRAPH_MPPC_COMPRESSION
options NETGRAPH_MPPC_ENCRYPTION
options NETGRAPH_NETFLOW
options NETGRAPH_NAT
@@ -1950,8 +1949,9 @@ device xmphy # XaQti XMAC II
# cm: Arcnet SMC COM90c26 / SMC COM90c56
# (and SMC COM90c66 in '56 compatibility mode) adapters.
# cxgb: Chelsio T3 based 1GbE/10GbE PCIe Ethernet adapters.
-# cxgbe:Chelsio T4 and T5 based 1GbE/10GbE/40GbE PCIe Ethernet adapters.
-# cxgbev: Chelsio T4 and T5 based PCIe Virtual Functions.
+# cxgbe:Chelsio T4, T5, and T6-based 1/10/25/40/100GbE PCIe Ethernet
+# adapters.
+# cxgbev: Chelsio T4, T5, and T6-based PCIe Virtual Functions.
# dc: Support for PCI fast ethernet adapters based on the DEC/Intel 21143
# and various workalikes including:
# the ADMtek AL981 Comet and AN985 Centaur, the ASIX Electronics
@@ -2102,9 +2102,6 @@ device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet
device bfe # Broadcom BCM440x 10/100 Ethernet
device bge # Broadcom BCM570xx Gigabit Ethernet
device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn
-device cxgb # Chelsio T3 10 Gigabit Ethernet
-device cxgb_t3fw # Chelsio T3 10 Gigabit Ethernet firmware
-device cxgbe # Chelsio T4 and T5 1GbE/10GbE/40GbE
device dc # DEC/Intel 21143 and various workalikes
device et # Agere ET1310 10/100/Gigabit Ethernet
device fxp # Intel EtherExpress PRO/100B (82557, 82558)
@@ -2135,7 +2132,10 @@ device wb # Winbond W89C840F
device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'')
# PCI Ethernet NICs.
-device cxgbev # Chelsio T4 and T5 1GbE/10GbE/40GbE VF
+device cxgb # Chelsio T3 10 Gigabit Ethernet
+device cxgb_t3fw # Chelsio T3 10 Gigabit Ethernet firmware
+device cxgbe # Chelsio T4-T6 1/10/25/40/100 Gigabit Ethernet
+device cxgbev # Chelsio T4-T6 Virtual Functions
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel Pro/1000 Gigabit Ethernet
device igb # Intel Pro/1000 PCIE Gigabit Ethernet
diff --git a/sys/conf/files b/sys/conf/files
index d67cbc9..ae4f87d 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1614,6 +1614,7 @@ dev/hptiop/hptiop.c optional hptiop scbus
dev/hwpmc/hwpmc_logging.c optional hwpmc
dev/hwpmc/hwpmc_mod.c optional hwpmc
dev/hwpmc/hwpmc_soft.c optional hwpmc
+dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus
dev/ichiic/ig4_iic.c optional ig4 iicbus
dev/ichiic/ig4_pci.c optional ig4 pci iicbus
dev/ichsmb/ichsmb.c optional ichsmb
@@ -2556,6 +2557,7 @@ dev/scd/scd.c optional scd isa
dev/scd/scd_isa.c optional scd isa
dev/sdhci/sdhci.c optional sdhci
dev/sdhci/sdhci_if.m optional sdhci
+dev/sdhci/sdhci_acpi.c optional sdhci acpi
dev/sdhci/sdhci_pci.c optional sdhci pci
dev/sf/if_sf.c optional sf pci
dev/sge/if_sge.c optional sge pci
diff --git a/sys/conf/kern.opts.mk b/sys/conf/kern.opts.mk
index 343b4f8..bb4270e 100644
--- a/sys/conf/kern.opts.mk
+++ b/sys/conf/kern.opts.mk
@@ -47,7 +47,8 @@ __DEFAULT_NO_OPTIONS = \
EISA \
EXTRA_TCP_STACKS \
NAND \
- OFED
+ OFED \
+ REPRODUCIBLE_BUILD
# Some options are totally broken on some architectures. We disable
# them. If you need to enable them on an experimental basis, you
diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk
index 128e47d..a16cb03 100644
--- a/sys/conf/kern.post.mk
+++ b/sys/conf/kern.post.mk
@@ -357,8 +357,11 @@ config.o env.o hints.o vers.o vnode_if.o:
config.ln env.ln hints.ln vers.ln vnode_if.ln:
${NORMAL_LINT}
+.if ${MK_REPRODUCIBLE_BUILD} != "no"
+REPRO_FLAG="-r"
+.endif
vers.c: $S/conf/newvers.sh $S/sys/param.h ${SYSTEM_DEP}
- MAKE=${MAKE} sh $S/conf/newvers.sh ${KERN_IDENT}
+ MAKE=${MAKE} sh $S/conf/newvers.sh ${REPRO_FLAG} ${KERN_IDENT}
vnode_if.c: $S/tools/vnode_if.awk $S/kern/vnode_if.src
${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -c
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh
index 0d5f795..fa05d7d 100644
--- a/sys/conf/newvers.sh
+++ b/sys/conf/newvers.sh
@@ -30,6 +30,18 @@
# @(#)newvers.sh 8.1 (Berkeley) 4/20/94
# $FreeBSD$
+# Command line options:
+#
+# -r Reproducible build. Do not embed directory names, user
+# names, time stamps or other dynamic information into
+# the output file. This is intended to allow two builds
+# done at different times and even by different people on
+# different hosts to produce identical output.
+#
+# -R Reproducible build if the tree represents an unmodified
+# checkout from a version control system. Metadata is
+# included if the tree is modified.
+
TYPE="FreeBSD"
REVISION="11.0"
BRANCH="STABLE"
@@ -163,8 +175,16 @@ fi
if [ -n "$svnversion" ] ; then
svn=`cd ${SYSDIR} && $svnversion 2>/dev/null`
case "$svn" in
- [0-9]*) svn=" r${svn}" ;;
- *) unset svn ;;
+ [0-9]*[MSP]|*:*)
+ svn=" r${svn}"
+ modified=true
+ ;;
+ [0-9]*)
+ svn=" r${svn}"
+ ;;
+ *)
+ unset svn
+ ;;
esac
fi
@@ -196,6 +216,7 @@ if [ -n "$git_cmd" ] ; then
if $git_cmd --work-tree=${SYSDIR}/.. diff-index \
--name-only HEAD | read dummy; then
git="${git}-dirty"
+ modified=true
fi
fi
@@ -208,7 +229,10 @@ if [ -n "$p4_cmd" ] ; then
p4opened=`cd ${SYSDIR} && $p4_cmd opened ./... 2>&1`
case "$p4opened" in
File*) ;;
- //*) p4version="${p4version}+edit" ;;
+ //*)
+ p4version="${p4version}+edit"
+ modified=true
+ ;;
esac
;;
*) unset p4version ;;
@@ -227,10 +251,32 @@ if [ -n "$hg_cmd" ] ; then
fi
fi
+include_metadata=true
+while getopts rR opt; do
+ case "$opt" in
+ r)
+ include_metadata=
+ ;;
+ R)
+ if [ -z "${modified}" ]; then
+ include_metadata=
+ fi
+ esac
+done
+shift $((OPTIND - 1))
+
+if [ -z "${include_metadata}" ]; then
+ VERINFO="${VERSION} ${svn}${git}${hg}${p4version}"
+ VERSTR="${VERINFO}\\n"
+else
+ VERINFO="${VERSION} #${v}${svn}${git}${hg}${p4version}: ${t}"
+ VERSTR="${VERINFO}\\n ${u}@${h}:${d}\\n"
+fi
+
cat << EOF > vers.c
$COPYRIGHT
-#define SCCSSTR "@(#)${VERSION} #${v}${svn}${git}${hg}${p4version}: ${t}"
-#define VERSTR "${VERSION} #${v}${svn}${git}${hg}${p4version}: ${t}\\n ${u}@${h}:${d}\\n"
+#define SCCSSTR "@(#)${VERINFO}"
+#define VERSTR "${VERSTR}"
#define RELSTR "${RELEASE}"
char sccs[sizeof(SCCSSTR) > 128 ? sizeof(SCCSSTR) : 128] = SCCSSTR;
diff --git a/sys/conf/options b/sys/conf/options
index 612de3d..59f7d95 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -510,7 +510,6 @@ NETGRAPH_IPFW opt_netgraph.h
NETGRAPH_KSOCKET opt_netgraph.h
NETGRAPH_L2TP opt_netgraph.h
NETGRAPH_LMI opt_netgraph.h
-# MPPC compression requires proprietary files (not included)
NETGRAPH_MPPC_COMPRESSION opt_netgraph.h
NETGRAPH_MPPC_ENCRYPTION opt_netgraph.h
NETGRAPH_NAT opt_netgraph.h
diff --git a/sys/contrib/ipfilter/netinet/ip_fil.h b/sys/contrib/ipfilter/netinet/ip_fil.h
index 076433c..646f5d6 100644
--- a/sys/contrib/ipfilter/netinet/ip_fil.h
+++ b/sys/contrib/ipfilter/netinet/ip_fil.h
@@ -11,6 +11,10 @@
#ifndef __IP_FIL_H__
#define __IP_FIL_H__
+#if !defined(linux) || !defined(_KERNEL)
+# include <netinet/in.h>
+#endif
+
#include "netinet/ip_compat.h"
#include "netinet/ipf_rb.h"
#if NETBSD_GE_REV(104040000)
@@ -24,10 +28,6 @@
# endif
#endif
-#if !defined(linux) || !defined(_KERNEL)
-# include <netinet/in.h>
-#endif
-
#ifndef SOLARIS
# if defined(sun) && (defined(__svr4__) || defined(__SVR4))
# define SOLARIS 1
diff --git a/sys/crypto/skein/amd64/skein_block_asm.s b/sys/crypto/skein/amd64/skein_block_asm.s
index b2d0a83..45d16db 100644
--- a/sys/crypto/skein/amd64/skein_block_asm.s
+++ b/sys/crypto/skein/amd64/skein_block_asm.s
@@ -1325,4 +1325,6 @@ _SP_OFFS_ = _SP_OFFS_-8
ret
.endif
#----------------------------------------------------------------
+ .section .note.GNU-stack,"",@progbits
+
.end
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index cf1d48a..9bd77b3 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -758,7 +758,7 @@ ahci_ch_attach(device_t dev)
/* Construct SIM entry */
ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
device_get_unit(dev), (struct mtx *)&ch->mtx,
- min(2, ch->numslots),
+ (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots),
(ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0,
devq);
if (ch->sim == NULL) {
@@ -1169,8 +1169,6 @@ ahci_ch_intr(void *arg)
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
- if (istatus == 0)
- return;
mtx_lock(&ch->mtx);
ahci_ch_intr_main(ch, istatus);
@@ -1187,8 +1185,6 @@ ahci_ch_intr_direct(void *arg)
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
- if (istatus == 0)
- return;
mtx_lock(&ch->mtx);
ch->batch = 1;
@@ -1275,8 +1271,19 @@ ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
/* Process command errors */
if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF |
AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
- ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
- >> AHCI_P_CMD_CCS_SHIFT;
+ if (ch->quirks & AHCI_Q_NOCCS) {
+ /*
+ * ASMedia chips sometimes report failed commands as
+ * completed. Count all running commands as failed.
+ */
+ cstatus |= ch->rslots;
+
+ /* They also report wrong CCS, so try to guess one. */
+ ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1;
+ } else {
+ ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) &
+ AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT;
+ }
//device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n",
// __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD),
// serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs);
diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h
index 1dc75ca..bb7b9fe 100644
--- a/sys/dev/ahci/ahci.h
+++ b/sys/dev/ahci/ahci.h
@@ -598,6 +598,7 @@ enum ahci_err_type {
#define AHCI_Q_FORCE_PI 0x00040000
#define AHCI_Q_RESTORE_CAP 0x00080000
#define AHCI_Q_NOMSIX 0x00100000
+#define AHCI_Q_NOCCS 0x00400000
#define AHCI_Q_BIT_STRING \
"\020" \
@@ -621,7 +622,8 @@ enum ahci_err_type {
"\0221MSI" \
"\023FORCE_PI" \
"\024RESTORE_CAP" \
- "\025NOMSIX"
+ "\025NOMSIX" \
+ "\027NOCCS"
int ahci_attach(device_t dev);
int ahci_detach(device_t dev);
diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c
index 4a44484..d681e36 100644
--- a/sys/dev/ahci/ahci_pci.c
+++ b/sys/dev/ahci/ahci_pci.c
@@ -73,8 +73,15 @@ static const struct {
{0x78021022, 0x00, "AMD Hudson-2", 0},
{0x78031022, 0x00, "AMD Hudson-2", 0},
{0x78041022, 0x00, "AMD Hudson-2", 0},
- {0x06111b21, 0x00, "ASMedia ASM2106", 0},
- {0x06121b21, 0x00, "ASMedia ASM1061", 0},
+ {0x06011b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS},
+ {0x06021b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS},
+ {0x06111b21, 0x00, "ASMedia ASM1061", AHCI_Q_NOCCS},
+ {0x06121b21, 0x00, "ASMedia ASM1062", AHCI_Q_NOCCS},
+ {0x06201b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06211b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06221b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06241b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06251b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
{0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE},
{0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE},
{0x26818086, 0x00, "Intel ESB2", 0},
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index 5bfd60c..a943a40 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -121,6 +121,8 @@ static struct alc_ident alc_ident_table[] = {
"Atheros AR8172 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024,
"Killer E2200 Gigabit Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024,
+ "Killer E2400 Gigabit Ethernet" },
{ 0, 0, 0, NULL}
};
@@ -255,7 +257,7 @@ static struct resource_spec alc_irq_spec_msix[] = {
{ -1, 0, 0 }
};
-static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
+static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
static int
alc_miibus_readreg(device_t dev, int phy, int reg)
@@ -1080,6 +1082,7 @@ alc_phy_down(struct alc_softc *sc)
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_AR8161:
case DEVICEID_ATHEROS_E2200:
+ case DEVICEID_ATHEROS_E2400:
case DEVICEID_ATHEROS_AR8162:
case DEVICEID_ATHEROS_AR8171:
case DEVICEID_ATHEROS_AR8172:
@@ -1397,12 +1400,15 @@ alc_attach(device_t dev)
* shows the same PHY model/revision number of AR8131.
*/
switch (sc->alc_ident->deviceid) {
+ case DEVICEID_ATHEROS_E2200:
+ case DEVICEID_ATHEROS_E2400:
+ sc->alc_flags |= ALC_FLAG_E2X00;
+ /* FALLTHROUGH */
case DEVICEID_ATHEROS_AR8161:
if (pci_get_subvendor(dev) == VENDORID_ATHEROS &&
pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0)
sc->alc_flags |= ALC_FLAG_LINK_WAR;
/* FALLTHROUGH */
- case DEVICEID_ATHEROS_E2200:
case DEVICEID_ATHEROS_AR8171:
sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
break;
@@ -1473,6 +1479,12 @@ alc_attach(device_t dev)
sc->alc_dma_rd_burst = 3;
if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
sc->alc_dma_wr_burst = 3;
+ /*
+ * Force maximum payload size to 128 bytes for E2200/E2400.
+ * Otherwise it triggers DMA write error.
+ */
+ if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)
+ sc->alc_dma_wr_burst = 0;
alc_init_pcie(sc);
}
@@ -4184,13 +4196,17 @@ alc_init_locked(struct alc_softc *sc)
reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
RXQ_CFG_RD_BURST_MASK;
reg |= RXQ_CFG_RSS_MODE_DIS;
- if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
- if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
- sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
- reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
+ if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
+ reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
+ } else {
+ if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
+ sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
+ reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
+ }
CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
/* Configure DMA parameters. */
@@ -4214,12 +4230,12 @@ alc_init_locked(struct alc_softc *sc)
switch (AR816X_REV(sc->alc_rev)) {
case AR816X_REV_A0:
case AR816X_REV_A1:
- reg |= DMA_CFG_RD_CHNL_SEL_1;
+ reg |= DMA_CFG_RD_CHNL_SEL_2;
break;
case AR816X_REV_B0:
/* FALLTHROUGH */
default:
- reg |= DMA_CFG_RD_CHNL_SEL_3;
+ reg |= DMA_CFG_RD_CHNL_SEL_4;
break;
}
}
diff --git a/sys/dev/alc/if_alcreg.h b/sys/dev/alc/if_alcreg.h
index 1ad75a3..ae63084 100644
--- a/sys/dev/alc/if_alcreg.h
+++ b/sys/dev/alc/if_alcreg.h
@@ -45,10 +45,11 @@
#define DEVICEID_ATHEROS_AR8152_B 0x2060 /* L2C V1.1 */
#define DEVICEID_ATHEROS_AR8152_B2 0x2062 /* L2C V2.0 */
#define DEVICEID_ATHEROS_AR8161 0x1091
-#define DEVICEID_ATHEROS_E2200 0xE091
#define DEVICEID_ATHEROS_AR8162 0x1090
#define DEVICEID_ATHEROS_AR8171 0x10A1
#define DEVICEID_ATHEROS_AR8172 0x10A0
+#define DEVICEID_ATHEROS_E2200 0xE091
+#define DEVICEID_ATHEROS_E2400 0xE0A1
#define ATHEROS_AR8152_B_V10 0xC0
#define ATHEROS_AR8152_B_V11 0xC1
diff --git a/sys/dev/alc/if_alcvar.h b/sys/dev/alc/if_alcvar.h
index 9a73ef4..a1c3382 100644
--- a/sys/dev/alc/if_alcvar.h
+++ b/sys/dev/alc/if_alcvar.h
@@ -235,7 +235,8 @@ struct alc_softc {
#define ALC_FLAG_APS 0x1000
#define ALC_FLAG_AR816X_FAMILY 0x2000
#define ALC_FLAG_LINK_WAR 0x4000
-#define ALC_FLAG_LINK 0x8000
+#define ALC_FLAG_E2X00 0x8000
+#define ALC_FLAG_LINK 0x10000
struct callout alc_tick_ch;
struct alc_hw_stats alc_stats;
diff --git a/sys/dev/cxgbe/t4_iov.c b/sys/dev/cxgbe/t4_iov.c
index 3014b75..0f7deb0 100644
--- a/sys/dev/cxgbe/t4_iov.c
+++ b/sys/dev/cxgbe/t4_iov.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pci_iov.h>
#endif
+#include "common/common.h"
#include "t4_if.h"
struct t4iov_softc {
@@ -106,6 +107,9 @@ t4iov_probe(device_t dev)
uint16_t d;
size_t i;
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
d = pci_get_device(dev);
for (i = 0; i < nitems(t4iov_pciids); i++) {
if (d == t4iov_pciids[i].device) {
@@ -123,6 +127,9 @@ t5iov_probe(device_t dev)
uint16_t d;
size_t i;
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
d = pci_get_device(dev);
for (i = 0; i < nitems(t5iov_pciids); i++) {
if (d == t5iov_pciids[i].device) {
@@ -140,6 +147,9 @@ t6iov_probe(device_t dev)
uint16_t d;
size_t i;
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
d = pci_get_device(dev);
for (i = 0; i < nitems(t6iov_pciids); i++) {
if (d == t6iov_pciids[i].device) {
@@ -161,6 +171,8 @@ t4iov_attach(device_t dev)
sc->sc_main = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
pci_get_slot(dev), 4);
+ if (sc->sc_main == NULL)
+ return (ENXIO);
if (T4_IS_MAIN_READY(sc->sc_main) == 0)
return (t4iov_attach_child(dev));
return (0);
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 2c06a50..73fabfd 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -623,6 +623,7 @@ struct {
#endif
}, t6_pciids[] = {
{0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
+ {0x6400, "Chelsio T6225-DBG"}, /* 2 x 10/25G, debug */
{0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
{0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
{0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 72af4e9..104df8d 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -2289,7 +2289,7 @@ slowpath:
w = &eq->desc[eq->pidx];
IDXINCR(eq->pidx, ndesc, eq->sidx);
- if (__predict_false(eq->pidx < ndesc - 1)) {
+ if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
w = &wrq->ss[0];
wrq->ss_pidx = cookie->pidx;
wrq->ss_len = len16 * 16;
@@ -3296,12 +3296,13 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
- htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
c.eqaddr = htobe64(eq->ba);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 7ad1fb6..ebd5766 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -107,7 +107,7 @@ free_atid(struct adapter *sc, int atid)
}
/*
- * Active open failed.
+ * Active open succeeded.
*/
static int
do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
@@ -126,9 +126,10 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
free_atid(sc, atid);
+ CURVNET_SET(toep->vnet);
INP_WLOCK(inp);
toep->tid = tid;
- insert_tid(sc, tid, toep);
+ insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
if (inp->inp_flags & INP_DROPPED) {
/* socket closed by the kernel before hw told us it connected */
@@ -141,6 +142,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt);
done:
INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
return (0);
}
@@ -178,6 +180,7 @@ act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
free_atid(sc, atid);
toep->tid = -1;
+ CURVNET_SET(toep->vnet);
if (status != EAGAIN)
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
@@ -185,8 +188,12 @@ act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
final_cpl_received(toep); /* unlocks inp */
if (status != EAGAIN)
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
}
+/*
+ * Active open failed.
+ */
static int
do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
@@ -268,6 +275,14 @@ t4_init_connect_cpl_handlers(void)
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
}
+void
+t4_uninit_connect_cpl_handlers(void)
+{
+
+ t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL);
+ t4_register_cpl_handler(CPL_ACT_OPEN_RPL, NULL);
+}
+
#define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \
reason = __LINE__; \
rc = (x); \
@@ -357,6 +372,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
if (wr == NULL)
DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
+ toep->vnet = so->so_vnet;
if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)
set_tcpddp_ulp_mode(toep);
else
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index c7e0661..007d438 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -306,7 +306,6 @@ make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
uint16_t tcpopt = be16toh(opt);
struct flowc_tx_params ftxp;
- CURVNET_SET(so->so_vnet);
INP_WLOCK_ASSERT(inp);
KASSERT(tp->t_state == TCPS_SYN_SENT ||
tp->t_state == TCPS_SYN_RECEIVED,
@@ -357,7 +356,6 @@ make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
send_flowc_wr(toep, &ftxp);
soisconnected(so);
- CURVNET_RESTORE();
}
static int
@@ -1146,6 +1144,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
tp = intotcpcb(inp);
@@ -1191,6 +1190,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
tcp_twstart(tp);
INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
INP_WLOCK(inp);
final_cpl_received(toep);
@@ -1203,6 +1203,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
done:
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
@@ -1229,6 +1230,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
tp = intotcpcb(inp);
@@ -1248,6 +1250,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
release:
INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
INP_WLOCK(inp);
final_cpl_received(toep); /* no more CPLs expected */
@@ -1272,6 +1275,7 @@ release:
done:
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
@@ -1345,6 +1349,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
inp = toep->inp;
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */
INP_WLOCK(inp);
@@ -1380,6 +1385,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
final_cpl_received(toep);
done:
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
return (0);
}
@@ -1501,18 +1507,21 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
DDP_UNLOCK(toep);
INP_WUNLOCK(inp);
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
tp = tcp_drop(tp, ECONNRESET);
if (tp)
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
/* receive buffer autosize */
- CURVNET_SET(so->so_vnet);
+ MPASS(toep->vnet == so->so_vnet);
+ CURVNET_SET(toep->vnet);
if (sb->sb_flags & SB_AUTOSIZE &&
V_tcp_do_autorcvbuf &&
sb->sb_hiwat < V_tcp_autorcvbuf_max &&
@@ -1713,10 +1722,12 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
tid);
#endif
toep->flags &= ~TPF_TX_SUSPENDED;
+ CURVNET_SET(toep->vnet);
if (toep->ulp_mode == ULP_MODE_ISCSI)
t4_push_pdus(sc, toep, plen);
else
t4_push_frames(sc, toep, plen);
+ CURVNET_RESTORE();
} else if (plen > 0) {
struct sockbuf *sb = &so->so_snd;
int sbu;
@@ -1837,12 +1848,12 @@ void
t4_uninit_cpl_io_handlers(void)
{
- t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
- t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
- t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
- t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
- t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
- t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
+ t4_register_cpl_handler(CPL_PEER_CLOSE, NULL);
+ t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL);
+ t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL);
+ t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL);
+ t4_register_cpl_handler(CPL_RX_DATA, NULL);
+ t4_register_cpl_handler(CPL_FW4_ACK, NULL);
}
/*
@@ -2143,7 +2154,7 @@ t4_aiotx_task(void *context, int pending)
struct socket *so = inp->inp_socket;
struct kaiocb *job;
- CURVNET_SET(so->so_vnet);
+ CURVNET_SET(toep->vnet);
SOCKBUF_LOCK(&so->so_snd);
while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) {
job = TAILQ_FIRST(&toep->aiotx_jobq);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 1a4cb71..762eb2e 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -546,7 +546,8 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
#endif
/* receive buffer autosize */
- CURVNET_SET(so->so_vnet);
+ MPASS(toep->vnet == so->so_vnet);
+ CURVNET_SET(toep->vnet);
SOCKBUF_LOCK(sb);
if (sb->sb_flags & SB_AUTOSIZE &&
V_tcp_do_autorcvbuf &&
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 472c9a5..f59d9a0 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -222,6 +222,7 @@ alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi)
TAILQ_INIT(&lctx->synq);
lctx->inp = inp;
+ lctx->vnet = inp->inp_socket->so_vnet;
in_pcbref(inp);
return (lctx);
@@ -824,14 +825,16 @@ done_with_synqe(struct adapter *sc, struct synq_entry *synqe)
struct inpcb *inp = lctx->inp;
struct vi_info *vi = synqe->syn->m_pkthdr.rcvif->if_softc;
struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
+ int ntids;
INP_WLOCK_ASSERT(inp);
+ ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1;
TAILQ_REMOVE(&lctx->synq, synqe, link);
inp = release_lctx(sc, lctx);
if (inp)
INP_WUNLOCK(inp);
- remove_tid(sc, synqe->tid);
+ remove_tid(sc, synqe->tid, ntids);
release_tid(sc, synqe->tid, &sc->sge.ctrlq[vi->pi->port_id]);
t4_l2t_release(e);
release_synqe(synqe); /* removed from synq list */
@@ -1180,7 +1183,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
struct l2t_entry *e = NULL;
int rscale, mtu_idx, rx_credits, rxqid, ulp_mode;
struct synq_entry *synqe = NULL;
- int reject_reason, v;
+ int reject_reason, v, ntids;
uint16_t vid;
#ifdef INVARIANTS
unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
@@ -1198,6 +1201,8 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
pi = sc->port[G_SYN_INTF(be16toh(cpl->l2info))];
+ CURVNET_SET(lctx->vnet);
+
/*
* Use the MAC index to lookup the associated VI. If this SYN
* didn't match a perfect MAC filter, punt.
@@ -1254,6 +1259,8 @@ found:
*/
if (!in6_ifhasaddr(ifp, &inc.inc6_laddr))
REJECT_PASS_ACCEPT();
+
+ ntids = 2;
} else {
/* Don't offload if the ifcap isn't enabled */
@@ -1266,8 +1273,17 @@ found:
*/
if (!in_ifhasaddr(ifp, inc.inc_laddr))
REJECT_PASS_ACCEPT();
+
+ ntids = 1;
}
+ /*
+ * Don't offload if the ifnet that the SYN came in on is not in the same
+ * vnet as the listening socket.
+ */
+ if (lctx->vnet != ifp->if_vnet)
+ REJECT_PASS_ACCEPT();
+
e = get_l2te_for_nexthop(pi, ifp, &inc);
if (e == NULL)
REJECT_PASS_ACCEPT();
@@ -1307,7 +1323,6 @@ found:
REJECT_PASS_ACCEPT();
}
so = inp->inp_socket;
- CURVNET_SET(so->so_vnet);
mtu_idx = find_best_mtu_idx(sc, &inc, be16toh(cpl->tcpopt.mss));
rscale = cpl->tcpopt.wsf && V_tcp_do_rfc1323 ? select_rcv_wscale() : 0;
@@ -1343,7 +1358,7 @@ found:
synqe->rcv_bufsize = rx_credits;
atomic_store_rel_ptr(&synqe->wr, (uintptr_t)wr);
- insert_tid(sc, tid, synqe);
+ insert_tid(sc, tid, synqe, ntids);
TAILQ_INSERT_TAIL(&lctx->synq, synqe, link);
hold_synqe(synqe); /* hold for the duration it's in the synq */
hold_lctx(lctx); /* A synqe on the list has a ref on its lctx */
@@ -1354,7 +1369,6 @@ found:
*/
toe_syncache_add(&inc, &to, &th, inp, tod, synqe);
INP_UNLOCK_ASSERT(inp); /* ok to assert, we have a ref on the inp */
- CURVNET_RESTORE();
/*
* If we replied during syncache_add (synqe->wr has been consumed),
@@ -1372,7 +1386,7 @@ found:
if (m)
m->m_pkthdr.rcvif = hw_ifp;
- remove_tid(sc, synqe->tid);
+ remove_tid(sc, synqe->tid, ntids);
free(wr, M_CXGBE);
/* Yank the synqe out of the lctx synq. */
@@ -1404,15 +1418,18 @@ found:
if (!(synqe->flags & TPF_SYNQE_EXPANDED))
send_reset_synqe(tod, synqe);
INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
release_synqe(synqe); /* extra hold */
return (__LINE__);
}
INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
release_synqe(synqe); /* extra hold */
return (0);
reject:
+ CURVNET_RESTORE();
CTR4(KTR_CXGBE, "%s: stid %u, tid %u, REJECT (%d)", __func__, stid, tid,
reject_reason);
@@ -1484,6 +1501,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
KASSERT(synqe->flags & TPF_SYNQE,
("%s: tid %u (ctx %p) not a synqe", __func__, tid, synqe));
+ CURVNET_SET(lctx->vnet);
INP_INFO_RLOCK(&V_tcbinfo); /* for syncache_expand */
INP_WLOCK(inp);
@@ -1501,6 +1519,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
@@ -1526,6 +1545,7 @@ reset:
send_reset_synqe(TOEDEV(ifp), synqe);
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
toep->tid = tid;
@@ -1562,6 +1582,8 @@ reset:
/* New connection inpcb is already locked by syncache_expand(). */
new_inp = sotoinpcb(so);
INP_WLOCK_ASSERT(new_inp);
+ MPASS(so->so_vnet == lctx->vnet);
+ toep->vnet = lctx->vnet;
/*
* This is for the unlikely case where the syncache entry that we added
@@ -1585,6 +1607,7 @@ reset:
if (inp != NULL)
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
release_synqe(synqe);
return (0);
@@ -1599,4 +1622,14 @@ t4_init_listen_cpl_handlers(void)
t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
}
+
+void
+t4_uninit_listen_cpl_handlers(void)
+{
+
+ t4_register_cpl_handler(CPL_PASS_OPEN_RPL, NULL);
+ t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, NULL);
+ t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, NULL);
+ t4_register_cpl_handler(CPL_PASS_ESTABLISH, NULL);
+}
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 3df2313..11558c2 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -273,8 +273,6 @@ undo_offload_socket(struct socket *so)
mtx_lock(&td->toep_list_lock);
TAILQ_REMOVE(&td->toep_list, toep, link);
mtx_unlock(&td->toep_list_lock);
-
- free_toepcb(toep);
}
static void
@@ -307,7 +305,7 @@ release_offload_resources(struct toepcb *toep)
t4_l2t_release(toep->l2te);
if (tid >= 0) {
- remove_tid(sc, tid);
+ remove_tid(sc, tid, toep->ce ? 2 : 1);
release_tid(sc, tid, toep->ctrlq);
}
@@ -420,12 +418,12 @@ final_cpl_received(struct toepcb *toep)
}
void
-insert_tid(struct adapter *sc, int tid, void *ctx)
+insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
{
struct tid_info *t = &sc->tids;
t->tid_tab[tid] = ctx;
- atomic_add_int(&t->tids_in_use, 1);
+ atomic_add_int(&t->tids_in_use, ntids);
}
void *
@@ -445,12 +443,12 @@ update_tid(struct adapter *sc, int tid, void *ctx)
}
void
-remove_tid(struct adapter *sc, int tid)
+remove_tid(struct adapter *sc, int tid, int ntids)
{
struct tid_info *t = &sc->tids;
t->tid_tab[tid] = NULL;
- atomic_subtract_int(&t->tids_in_use, 1);
+ atomic_subtract_int(&t->tids_in_use, ntids);
}
void
@@ -799,74 +797,96 @@ update_clip_table(struct adapter *sc, struct tom_data *td)
struct in6_addr *lip, tlip;
struct clip_head stale;
struct clip_entry *ce, *ce_temp;
- int rc, gen = atomic_load_acq_int(&in6_ifaddr_gen);
+ struct vi_info *vi;
+ int rc, gen, i, j;
+ uintptr_t last_vnet;
ASSERT_SYNCHRONIZED_OP(sc);
IN6_IFADDR_RLOCK(&in6_ifa_tracker);
mtx_lock(&td->clip_table_lock);
+ gen = atomic_load_acq_int(&in6_ifaddr_gen);
if (gen == td->clip_gen)
goto done;
TAILQ_INIT(&stale);
TAILQ_CONCAT(&stale, &td->clip_table, link);
- TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
- lip = &ia->ia_addr.sin6_addr;
+ /*
+ * last_vnet optimizes the common cases where all if_vnet = NULL (no
+ * VIMAGE) or all if_vnet = vnet0.
+ */
+ last_vnet = (uintptr_t)(-1);
+ for_each_port(sc, i)
+ for_each_vi(sc->port[i], j, vi) {
+ if (last_vnet == (uintptr_t)vi->ifp->if_vnet)
+ continue;
- KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
- ("%s: mcast address in in6_ifaddr list", __func__));
+ /* XXX: races with if_vmove */
+ CURVNET_SET(vi->ifp->if_vnet);
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ lip = &ia->ia_addr.sin6_addr;
+
+ KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
+ ("%s: mcast address in in6_ifaddr list", __func__));
+
+ if (IN6_IS_ADDR_LOOPBACK(lip))
+ continue;
+ if (IN6_IS_SCOPE_EMBED(lip)) {
+ /* Remove the embedded scope */
+ tlip = *lip;
+ lip = &tlip;
+ in6_clearscope(lip);
+ }
+ /*
+ * XXX: how to weed out the link local address for the
+ * loopback interface? It's fe80::1 usually (always?).
+ */
+
+ /*
+ * If it's in the main list then we already know it's
+ * not stale.
+ */
+ TAILQ_FOREACH(ce, &td->clip_table, link) {
+ if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
+ goto next;
+ }
- if (IN6_IS_ADDR_LOOPBACK(lip))
- continue;
- if (IN6_IS_SCOPE_EMBED(lip)) {
- /* Remove the embedded scope */
- tlip = *lip;
- lip = &tlip;
- in6_clearscope(lip);
- }
- /*
- * XXX: how to weed out the link local address for the loopback
- * interface? It's fe80::1 usually (always?).
- */
-
- /*
- * If it's in the main list then we already know it's not stale.
- */
- TAILQ_FOREACH(ce, &td->clip_table, link) {
- if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
- goto next;
- }
+ /*
+ * If it's in the stale list we should move it to the
+ * main list.
+ */
+ TAILQ_FOREACH(ce, &stale, link) {
+ if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) {
+ TAILQ_REMOVE(&stale, ce, link);
+ TAILQ_INSERT_TAIL(&td->clip_table, ce,
+ link);
+ goto next;
+ }
+ }
- /*
- * If it's in the stale list we should move it to the main list.
- */
- TAILQ_FOREACH(ce, &stale, link) {
- if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) {
- TAILQ_REMOVE(&stale, ce, link);
+ /* A new IP6 address; add it to the CLIP table */
+ ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT);
+ memcpy(&ce->lip, lip, sizeof(ce->lip));
+ ce->refcount = 0;
+ rc = add_lip(sc, lip);
+ if (rc == 0)
TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
- goto next;
- }
- }
+ else {
+ char ip[INET6_ADDRSTRLEN];
- /* A new IP6 address; add it to the CLIP table */
- ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT);
- memcpy(&ce->lip, lip, sizeof(ce->lip));
- ce->refcount = 0;
- rc = add_lip(sc, lip);
- if (rc == 0)
- TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
- else {
- char ip[INET6_ADDRSTRLEN];
-
- inet_ntop(AF_INET6, &ce->lip, &ip[0], sizeof(ip));
- log(LOG_ERR, "%s: could not add %s (%d)\n",
- __func__, ip, rc);
- free(ce, M_CXGBE);
- }
+ inet_ntop(AF_INET6, &ce->lip, &ip[0],
+ sizeof(ip));
+ log(LOG_ERR, "%s: could not add %s (%d)\n",
+ __func__, ip, rc);
+ free(ce, M_CXGBE);
+ }
next:
- continue;
+ continue;
+ }
+ CURVNET_RESTORE();
+ last_vnet = (uintptr_t)vi->ifp->if_vnet;
}
/*
@@ -1207,6 +1227,10 @@ t4_tom_mod_unload(void)
t4_ddp_mod_unload();
+ t4_uninit_connect_cpl_handlers();
+ t4_uninit_listen_cpl_handlers();
+ t4_uninit_cpl_io_handlers();
+
return (0);
}
#endif /* TCP_OFFLOAD */
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 48223b0..18bc1f2 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -141,6 +141,7 @@ struct toepcb {
int refcount;
struct tom_data *td;
struct inpcb *inp; /* backpointer to host stack's PCB */
+ struct vnet *vnet;
struct vi_info *vi; /* virtual interface */
struct sge_wrq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
@@ -232,6 +233,7 @@ struct listen_ctx {
struct stid_region stid_region;
int flags;
struct inpcb *inp; /* listening socket's inp */
+ struct vnet *vnet;
struct sge_wrq *ctrlq;
struct sge_ofld_rxq *ofld_rxq;
struct clip_entry *ce;
@@ -306,10 +308,10 @@ void free_toepcb(struct toepcb *);
void offload_socket(struct socket *, struct toepcb *);
void undo_offload_socket(struct socket *);
void final_cpl_received(struct toepcb *);
-void insert_tid(struct adapter *, int, void *);
+void insert_tid(struct adapter *, int, void *, int);
void *lookup_tid(struct adapter *, int);
void update_tid(struct adapter *, int, void *);
-void remove_tid(struct adapter *, int);
+void remove_tid(struct adapter *, int, int);
void release_tid(struct adapter *, int, struct sge_wrq *);
int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int);
u_long select_rcv_wnd(struct socket *);
@@ -324,12 +326,14 @@ void release_lip(struct tom_data *, struct clip_entry *);
/* t4_connect.c */
void t4_init_connect_cpl_handlers(void);
+void t4_uninit_connect_cpl_handlers(void);
int t4_connect(struct toedev *, struct socket *, struct rtentry *,
struct sockaddr *);
void act_open_failure_cleanup(struct adapter *, u_int, u_int);
/* t4_listen.c */
void t4_init_listen_cpl_handlers(void);
+void t4_uninit_listen_cpl_handlers(void);
int t4_listen_start(struct toedev *, struct tcpcb *);
int t4_listen_stop(struct toedev *, struct tcpcb *);
void t4_syncache_added(struct toedev *, void *);
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
index 6e9a86c..0fa5249 100644
--- a/sys/dev/drm2/i915/i915_gem.c
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -1474,8 +1474,8 @@ i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
int i915_intr_pf;
static int
-i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
- vm_page_t *mres)
+i915_gem_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
+ vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
{
struct drm_gem_object *gem_obj = vm_obj->handle;
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -1483,31 +1483,9 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
drm_i915_private_t *dev_priv = dev->dev_private;
vm_page_t page;
int ret = 0;
-#ifdef FREEBSD_WIP
- bool write = (prot & VM_PROT_WRITE) != 0;
-#else
- bool write = true;
-#endif /* FREEBSD_WIP */
+ bool write = (max_prot & VM_PROT_WRITE) != 0;
bool pinned;
- vm_object_pip_add(vm_obj, 1);
-
- /*
- * Remove the placeholder page inserted by vm_fault() from the
- * object before dropping the object lock. If
- * i915_gem_release_mmap() is active in parallel on this gem
- * object, then it owns the drm device sx and might find the
- * placeholder already. Then, since the page is busy,
- * i915_gem_release_mmap() sleeps waiting for the busy state
- * of the page cleared. We will be unable to acquire drm
- * device lock until i915_gem_release_mmap() is able to make a
- * progress.
- */
- if (*mres != NULL) {
- vm_page_lock(*mres);
- vm_page_remove(*mres);
- vm_page_unlock(*mres);
- }
VM_OBJECT_WUNLOCK(vm_obj);
retry:
ret = 0;
@@ -1527,7 +1505,7 @@ retry:
* mapping for the page. Recheck.
*/
VM_OBJECT_WLOCK(vm_obj);
- page = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
+ page = vm_page_lookup(vm_obj, pidx);
if (page != NULL) {
if (vm_page_busied(page)) {
DRM_UNLOCK(dev);
@@ -1556,20 +1534,19 @@ retry:
obj->fault_mappable = true;
- VM_OBJECT_WLOCK(vm_obj);
- page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset + offset);
- KASSERT((page->flags & PG_FICTITIOUS) != 0,
- ("physical address %#jx not fictitious",
- (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset + offset)));
+ page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
+ IDX_TO_OFF(pidx));
if (page == NULL) {
- VM_OBJECT_WUNLOCK(vm_obj);
ret = -EFAULT;
goto unpin;
}
KASSERT((page->flags & PG_FICTITIOUS) != 0,
- ("not fictitious %p", page));
+ ("physical address %#jx not fictitious, page %p",
+ (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
+ IDX_TO_OFF(pidx)), page));
KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
+ VM_OBJECT_WLOCK(vm_obj);
if (vm_page_busied(page)) {
i915_gem_object_unpin(obj);
DRM_UNLOCK(dev);
@@ -1578,7 +1555,7 @@ retry:
vm_page_busy_sleep(page, "915pbs", false);
goto retry;
}
- if (vm_page_insert(page, vm_obj, OFF_TO_IDX(offset))) {
+ if (vm_page_insert(page, vm_obj, pidx)) {
i915_gem_object_unpin(obj);
DRM_UNLOCK(dev);
VM_OBJECT_WUNLOCK(vm_obj);
@@ -1589,24 +1566,17 @@ retry:
have_page:
vm_page_xbusy(page);
- CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
+ CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, pidx, fault_type,
page->phys_addr);
if (pinned) {
/*
* We may have not pinned the object if the page was
- * found by the call to vm_page_lookup()
+ * found by the call to vm_page_lookup().
*/
i915_gem_object_unpin(obj);
}
DRM_UNLOCK(dev);
- if (*mres != NULL) {
- KASSERT(*mres != page, ("losing %p %p", *mres, page));
- vm_page_lock(*mres);
- vm_page_free(*mres);
- vm_page_unlock(*mres);
- }
- *mres = page;
- vm_object_pip_wakeup(vm_obj);
+ *first = *last = pidx;
return (VM_PAGER_OK);
unpin:
@@ -1615,7 +1585,7 @@ unlock:
DRM_UNLOCK(dev);
out:
KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
- CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, offset, prot,
+ CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, pidx, fault_type,
-ret);
if (ret == -ERESTARTSYS) {
/*
@@ -1629,7 +1599,6 @@ out:
goto retry;
}
VM_OBJECT_WLOCK(vm_obj);
- vm_object_pip_wakeup(vm_obj);
return (VM_PAGER_ERROR);
}
@@ -1645,9 +1614,9 @@ i915_gem_pager_dtor(void *handle)
}
struct cdev_pager_ops i915_gem_pager_ops = {
- .cdev_pg_fault = i915_gem_pager_fault,
- .cdev_pg_ctor = i915_gem_pager_ctor,
- .cdev_pg_dtor = i915_gem_pager_dtor
+ .cdev_pg_populate = i915_gem_pager_populate,
+ .cdev_pg_ctor = i915_gem_pager_ctor,
+ .cdev_pg_dtor = i915_gem_pager_dtor,
};
/**
diff --git a/sys/dev/etherswitch/etherswitch.c b/sys/dev/etherswitch/etherswitch.c
index 0c35002..ee9b710 100644
--- a/sys/dev/etherswitch/etherswitch.c
+++ b/sys/dev/etherswitch/etherswitch.c
@@ -99,17 +99,24 @@ etherswitch_probe(device_t dev)
static int
etherswitch_attach(device_t dev)
{
- struct etherswitch_softc *sc = (struct etherswitch_softc *)device_get_softc(dev);
+ int err;
+ struct etherswitch_softc *sc;
+ struct make_dev_args devargs;
+ sc = device_get_softc(dev);
sc->sc_dev = dev;
- sc->sc_devnode = make_dev(&etherswitch_cdevsw, device_get_unit(dev),
- UID_ROOT, GID_WHEEL,
- 0600, "etherswitch%d", device_get_unit(dev));
- if (sc->sc_devnode == NULL) {
+ make_dev_args_init(&devargs);
+ devargs.mda_devsw = &etherswitch_cdevsw;
+ devargs.mda_uid = UID_ROOT;
+ devargs.mda_gid = GID_WHEEL;
+ devargs.mda_mode = 0600;
+ devargs.mda_si_drv1 = sc;
+ err = make_dev_s(&devargs, &sc->sc_devnode, "etherswitch%d",
+ device_get_unit(dev));
+ if (err != 0) {
device_printf(dev, "failed to create character device\n");
return (ENXIO);
}
- sc->sc_devnode->si_drv1 = sc;
return (0);
}
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 828abc3..a5a9d81 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -78,18 +78,25 @@ gpioc_probe(device_t dev)
static int
gpioc_attach(device_t dev)
{
- struct gpioc_softc *sc = device_get_softc(dev);
+ int err;
+ struct gpioc_softc *sc;
+ struct make_dev_args devargs;
+ sc = device_get_softc(dev);
sc->sc_dev = dev;
sc->sc_pdev = device_get_parent(dev);
sc->sc_unit = device_get_unit(dev);
- sc->sc_ctl_dev = make_dev(&gpioc_cdevsw, sc->sc_unit,
- UID_ROOT, GID_WHEEL, 0600, "gpioc%d", sc->sc_unit);
- if (!sc->sc_ctl_dev) {
+ make_dev_args_init(&devargs);
+ devargs.mda_devsw = &gpioc_cdevsw;
+ devargs.mda_uid = UID_ROOT;
+ devargs.mda_gid = GID_WHEEL;
+ devargs.mda_mode = 0600;
+ devargs.mda_si_drv1 = sc;
+ err = make_dev_s(&devargs, &sc->sc_ctl_dev, "gpioc%d", sc->sc_unit);
+ if (err != 0) {
printf("Failed to create gpioc%d", sc->sc_unit);
return (ENXIO);
}
- sc->sc_ctl_dev->si_drv1 = sc;
return (0);
}
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index c734c5a..4986b29 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -2857,7 +2857,7 @@ pmc_core_initialize(struct pmc_mdep *md, int maxcpu, int version_override)
PMCDBG3(MDP,INI,1,"core-init cputype=%d ncpu=%d ipa-version=%d",
core_cputype, maxcpu, ipa_version);
- if (ipa_version < 1 || ipa_version > 3 ||
+ if (ipa_version < 1 || ipa_version > 4 ||
(core_cputype != PMC_CPU_INTEL_CORE && ipa_version == 1)) {
/* Unknown PMC architecture. */
printf("hwpc_core: unknown PMC architecture: %d\n",
diff --git a/sys/dev/ichiic/ig4_acpi.c b/sys/dev/ichiic/ig4_acpi.c
new file mode 100644
index 0000000..90d6214
--- /dev/null
+++ b/sys/dev/ichiic/ig4_acpi.c
@@ -0,0 +1,166 @@
+/*-
+ * Copyright (c) 2016 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <dev/ichiic/ig4_reg.h>
+#include <dev/ichiic/ig4_var.h>
+
+static int ig4iic_acpi_probe(device_t dev);
+static int ig4iic_acpi_attach(device_t dev);
+static int ig4iic_acpi_detach(device_t dev);
+
+static char *ig4iic_ids[] = {
+ "INT33C2",
+ "INT33C3",
+ "INT3432",
+ "INT3433",
+ "80860F41",
+ "808622C1",
+ "AMDI0510",
+ "APMC0D0F",
+ NULL
+};
+
+static int
+ig4iic_acpi_probe(device_t dev)
+{
+
+ if (acpi_disabled("ig4iic") ||
+ ACPI_ID_PROBE(device_get_parent(dev), dev, ig4iic_ids) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "Designware I2C Controller");
+ return (0);
+}
+
+static int
+ig4iic_acpi_attach(device_t dev)
+{
+ ig4iic_softc_t *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ sc->dev = dev;
+ sc->regs_rid = 0;
+ sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->regs_rid, RF_ACTIVE);
+ if (sc->regs_res == NULL) {
+ device_printf(dev, "unable to map registers\n");
+ ig4iic_acpi_detach(dev);
+ return (ENXIO);
+ }
+ sc->intr_rid = 0;
+ sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->intr_rid, RF_SHAREABLE | RF_ACTIVE);
+ if (sc->intr_res == NULL) {
+ device_printf(dev, "unable to map interrupt\n");
+ ig4iic_acpi_detach(dev);
+ return (ENXIO);
+ }
+ sc->platform_attached = 1;
+
+ error = ig4iic_attach(sc);
+ if (error)
+ ig4iic_acpi_detach(dev);
+
+ return (error);
+}
+
+static int
+ig4iic_acpi_detach(device_t dev)
+{
+ ig4iic_softc_t *sc = device_get_softc(dev);
+ int error;
+
+ if (sc->platform_attached) {
+ error = ig4iic_detach(sc);
+ if (error)
+ return (error);
+ sc->platform_attached = 0;
+ }
+
+ if (sc->intr_res) {
+ bus_release_resource(dev, SYS_RES_IRQ,
+ sc->intr_rid, sc->intr_res);
+ sc->intr_res = NULL;
+ }
+ if (sc->regs_res) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ sc->regs_rid, sc->regs_res);
+ sc->regs_res = NULL;
+ }
+
+ return (0);
+}
+
+static device_method_t ig4iic_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ig4iic_acpi_probe),
+ DEVMETHOD(device_attach, ig4iic_acpi_attach),
+ DEVMETHOD(device_detach, ig4iic_acpi_detach),
+
+ /* iicbus interface */
+ DEVMETHOD(iicbus_transfer, ig4iic_transfer),
+ DEVMETHOD(iicbus_reset, ig4iic_reset),
+ DEVMETHOD(iicbus_callback, iicbus_null_callback),
+
+ DEVMETHOD_END
+};
+
+static driver_t ig4iic_acpi_driver = {
+ "ig4iic_acpi",
+ ig4iic_acpi_methods,
+ sizeof(struct ig4iic_softc),
+};
+
+static devclass_t ig4iic_acpi_devclass;
+DRIVER_MODULE(ig4iic_acpi, acpi, ig4iic_acpi_driver, ig4iic_acpi_devclass, 0, 0);
+
+MODULE_DEPEND(ig4iic_acpi, acpi, 1, 1, 1);
+MODULE_DEPEND(ig4iic_acpi, pci, 1, 1, 1);
+MODULE_DEPEND(ig4iic_acpi, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
+MODULE_VERSION(ig4iic_acpi, 1);
diff --git a/sys/dev/ichiic/ig4_iic.c b/sys/dev/ichiic/ig4_iic.c
index 65e93cd..b99ce3a 100644
--- a/sys/dev/ichiic/ig4_iic.c
+++ b/sys/dev/ichiic/ig4_iic.c
@@ -522,6 +522,9 @@ ig4iic_attach(ig4iic_softc_t *sc)
int error;
uint32_t v;
+ mtx_init(&sc->io_lock, "IG4 I/O lock", NULL, MTX_DEF);
+ sx_init(&sc->call_lock, "IG4 call lock");
+
v = reg_read(sc, IG4_REG_COMP_TYPE);
v = reg_read(sc, IG4_REG_COMP_PARAM1);
v = reg_read(sc, IG4_REG_GENERAL);
@@ -664,6 +667,10 @@ ig4iic_detach(ig4iic_softc_t *sc)
mtx_unlock(&sc->io_lock);
sx_xunlock(&sc->call_lock);
+
+ mtx_destroy(&sc->io_lock);
+ sx_destroy(&sc->call_lock);
+
return (0);
}
@@ -731,4 +738,5 @@ ig4iic_dump(ig4iic_softc_t *sc)
}
#undef REGDUMP
-DRIVER_MODULE(iicbus, ig4iic, iicbus_driver, iicbus_devclass, NULL, NULL);
+DRIVER_MODULE(iicbus, ig4iic_acpi, iicbus_driver, iicbus_devclass, NULL, NULL);
+DRIVER_MODULE(iicbus, ig4iic_pci, iicbus_driver, iicbus_devclass, NULL, NULL);
diff --git a/sys/dev/ichiic/ig4_pci.c b/sys/dev/ichiic/ig4_pci.c
index 0dd551f..04e31d6 100644
--- a/sys/dev/ichiic/ig4_pci.c
+++ b/sys/dev/ichiic/ig4_pci.c
@@ -68,6 +68,12 @@ static int ig4iic_pci_detach(device_t dev);
#define PCI_CHIP_LYNXPT_LP_I2C_1 0x9c618086
#define PCI_CHIP_LYNXPT_LP_I2C_2 0x9c628086
+#define PCI_CHIP_BRASWELL_I2C_1 0x22c18086
+#define PCI_CHIP_BRASWELL_I2C_2 0x22c28086
+#define PCI_CHIP_BRASWELL_I2C_3 0x22c38086
+#define PCI_CHIP_BRASWELL_I2C_5 0x22c58086
+#define PCI_CHIP_BRASWELL_I2C_6 0x22c68086
+#define PCI_CHIP_BRASWELL_I2C_7 0x22c78086
static int
ig4iic_pci_probe(device_t dev)
@@ -79,6 +85,24 @@ ig4iic_pci_probe(device_t dev)
case PCI_CHIP_LYNXPT_LP_I2C_2:
device_set_desc(dev, "Intel Lynx Point-LP I2C Controller-2");
break;
+ case PCI_CHIP_BRASWELL_I2C_1:
+ device_set_desc(dev, "Intel Braswell Serial I/O I2C Port 1");
+ break;
+ case PCI_CHIP_BRASWELL_I2C_2:
+ device_set_desc(dev, "Intel Braswell Serial I/O I2C Port 2");
+ break;
+ case PCI_CHIP_BRASWELL_I2C_3:
+ device_set_desc(dev, "Intel Braswell Serial I/O I2C Port 3");
+ break;
+ case PCI_CHIP_BRASWELL_I2C_5:
+ device_set_desc(dev, "Intel Braswell Serial I/O I2C Port 5");
+ break;
+ case PCI_CHIP_BRASWELL_I2C_6:
+ device_set_desc(dev, "Intel Braswell Serial I/O I2C Port 6");
+ break;
+ case PCI_CHIP_BRASWELL_I2C_7:
+ device_set_desc(dev, "Intel Braswell Serial I/O I2C Port 7");
+ break;
default:
return (ENXIO);
}
@@ -91,11 +115,6 @@ ig4iic_pci_attach(device_t dev)
ig4iic_softc_t *sc = device_get_softc(dev);
int error;
- bzero(sc, sizeof(*sc));
-
- mtx_init(&sc->io_lock, "IG4 I/O lock", NULL, MTX_DEF);
- sx_init(&sc->call_lock, "IG4 call lock");
-
sc->dev = dev;
sc->regs_rid = PCIR_BAR(0);
sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
@@ -116,7 +135,7 @@ ig4iic_pci_attach(device_t dev)
ig4iic_pci_detach(dev);
return (ENXIO);
}
- sc->pci_attached = 1;
+ sc->platform_attached = 1;
error = ig4iic_attach(sc);
if (error)
@@ -131,11 +150,11 @@ ig4iic_pci_detach(device_t dev)
ig4iic_softc_t *sc = device_get_softc(dev);
int error;
- if (sc->pci_attached) {
+ if (sc->platform_attached) {
error = ig4iic_detach(sc);
if (error)
return (error);
- sc->pci_attached = 0;
+ sc->platform_attached = 0;
}
if (sc->intr_res) {
@@ -150,10 +169,6 @@ ig4iic_pci_detach(device_t dev)
sc->regs_rid, sc->regs_res);
sc->regs_res = NULL;
}
- if (mtx_initialized(&sc->io_lock)) {
- mtx_destroy(&sc->io_lock);
- sx_destroy(&sc->call_lock);
- }
return (0);
}
@@ -172,15 +187,15 @@ static device_method_t ig4iic_pci_methods[] = {
};
static driver_t ig4iic_pci_driver = {
- "ig4iic",
+ "ig4iic_pci",
ig4iic_pci_methods,
sizeof(struct ig4iic_softc)
};
static devclass_t ig4iic_pci_devclass;
-DRIVER_MODULE_ORDERED(ig4iic, pci, ig4iic_pci_driver, ig4iic_pci_devclass, 0, 0,
+DRIVER_MODULE_ORDERED(ig4iic_pci, pci, ig4iic_pci_driver, ig4iic_pci_devclass, 0, 0,
SI_ORDER_ANY);
-MODULE_DEPEND(ig4iic, pci, 1, 1, 1);
-MODULE_DEPEND(ig4iic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
-MODULE_VERSION(ig4iic, 1);
+MODULE_DEPEND(ig4iic_pci, pci, 1, 1, 1);
+MODULE_DEPEND(ig4iic_pci, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
+MODULE_VERSION(ig4iic_pci, 1);
diff --git a/sys/dev/ichiic/ig4_var.h b/sys/dev/ichiic/ig4_var.h
index dcfdc04..efa2482 100644
--- a/sys/dev/ichiic/ig4_var.h
+++ b/sys/dev/ichiic/ig4_var.h
@@ -65,7 +65,7 @@ struct ig4iic_softc {
char rbuf[IG4_RBUFSIZE];
int error;
uint8_t last_slave;
- int pci_attached : 1;
+ int platform_attached : 1;
int use_10bit : 1;
int slave_valid : 1;
int read_started : 1;
diff --git a/sys/dev/isci/isci_task_request.c b/sys/dev/isci/isci_task_request.c
index 6c8be45..2ed0afe 100644
--- a/sys/dev/isci/isci_task_request.c
+++ b/sys/dev/isci/isci_task_request.c
@@ -210,8 +210,9 @@ isci_task_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
retry_task = FALSE;
isci_log_message(0, "ISCI",
"task timeout - not retrying\n");
- scif_cb_domain_device_removed(isci_controller,
- isci_remote_device->domain, isci_remote_device);
+ scif_cb_domain_device_removed(scif_controller,
+ isci_remote_device->domain->sci_object,
+ remote_device);
} else {
retry_task = TRUE;
isci_log_message(0, "ISCI",
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index cf2231d..f54dbf7 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -3878,6 +3878,7 @@ ixgbe_handle_msf(void *context, int pending)
/* Adjust media types shown in ifconfig */
ifmedia_removeall(&adapter->media);
ixgbe_add_media_types(adapter);
+ ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
IXGBE_CORE_UNLOCK(adapter);
return;
}
diff --git a/sys/dev/kbd/kbd.c b/sys/dev/kbd/kbd.c
index 74c5a56..24dc9bb 100644
--- a/sys/dev/kbd/kbd.c
+++ b/sys/dev/kbd/kbd.c
@@ -884,7 +884,7 @@ genkbd_commonioctl(keyboard_t *kbd, u_long cmd, caddr_t arg)
omapp->key[i].spcl = mapp->key[i].spcl;
omapp->key[i].flgs = mapp->key[i].flgs;
}
- return (0);
+ break;
case PIO_KEYMAP: /* set keyboard translation table */
case OPIO_KEYMAP: /* set keyboard translation table (compat) */
#ifndef KBD_DISABLE_KEYMAP_LOAD
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index 23f664d..d61ada0 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -306,6 +306,12 @@ mlx5e_update_carrier_work(struct work_struct *work)
PRIV_UNLOCK(priv);
}
+/*
+ * This function reads the physical port counters from the firmware
+ * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
+ * macros. The output is converted from big-endian 64-bit values into
+ * host endian ones and stored in the "priv->stats.pport" structure.
+ */
static void
mlx5e_update_pport_counters(struct mlx5e_priv *priv)
{
@@ -314,25 +320,32 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
u32 *in;
u32 *out;
- u64 *ptr;
+ const u64 *ptr;
unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
unsigned x;
unsigned y;
+ /* allocate firmware request structures */
in = mlx5_vzalloc(sz);
out = mlx5_vzalloc(sz);
if (in == NULL || out == NULL)
goto free_out;
- ptr = (uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
+ /*
+ * Get pointer to the 64-bit counter set which is located at a
+ * fixed offset in the output firmware request structure:
+ */
+ ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
MLX5_SET(ppcnt_reg, in, local_port, 1);
+ /* read IEEE802_3 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = y = 0; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
s->arg[y] = be64toh(ptr[x]);
+ /* read RFC2819 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
@@ -341,20 +354,29 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
+ /* read RFC2863 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
+ /* read physical layer stats counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
free_out:
+ /* free firmware request structures */
kvfree(in);
kvfree(out);
}
+/*
+ * This function is called regularly to collect all statistics
+ * counters from the firmware. The values can be viewed through the
+ * sysctl interface. Execution is serialized using the priv's global
+ * configuration lock.
+ */
static void
mlx5e_update_stats_work(struct work_struct *work)
{
diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c
index 9f88079..d5e5256 100644
--- a/sys/dev/mmc/mmc.c
+++ b/sys/dev/mmc/mmc.c
@@ -401,7 +401,7 @@ mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req)
msleep(req, &sc->sc_mtx, 0, "mmcreq", 0);
MMC_UNLOCK(sc);
if (mmc_debug > 2 || (mmc_debug > 0 && req->cmd->error != MMC_ERR_NONE))
- device_printf(sc->dev, "CMD%d RESULT: %d\n",
+ device_printf(sc->dev, "CMD%d RESULT: %d\n",
req->cmd->opcode, req->cmd->error);
return (0);
}
@@ -511,7 +511,7 @@ mmc_idle_cards(struct mmc_softc *sc)
{
device_t dev;
struct mmc_command cmd;
-
+
dev = sc->dev;
mmcbr_set_chip_select(dev, cs_high);
mmcbr_update_ios(dev);
@@ -795,7 +795,7 @@ mmc_test_bus_width(struct mmc_softc *sc)
data.len = 8;
data.flags = MMC_DATA_WRITE;
mmc_wait_for_cmd(sc, &cmd, 0);
-
+
memset(&cmd, 0, sizeof(cmd));
memset(&data, 0, sizeof(data));
cmd.opcode = MMC_BUSTEST_R;
@@ -808,7 +808,7 @@ mmc_test_bus_width(struct mmc_softc *sc)
data.flags = MMC_DATA_READ;
err = mmc_wait_for_cmd(sc, &cmd, 0);
sc->squelched--;
-
+
mmcbr_set_bus_width(sc->dev, bus_width_1);
mmcbr_update_ios(sc->dev);
@@ -832,7 +832,7 @@ mmc_test_bus_width(struct mmc_softc *sc)
data.len = 4;
data.flags = MMC_DATA_WRITE;
mmc_wait_for_cmd(sc, &cmd, 0);
-
+
memset(&cmd, 0, sizeof(cmd));
memset(&data, 0, sizeof(data));
cmd.opcode = MMC_BUSTEST_R;
@@ -1017,7 +1017,7 @@ mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd)
csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3);
csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4);
csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1);
- } else
+ } else
panic("unknown SD CSD version");
}
@@ -1349,9 +1349,9 @@ mmc_discover_cards(struct mmc_softc *sc)
if (ivar->csd.csd_structure > 0)
ivar->high_cap = 1;
ivar->tran_speed = ivar->csd.tran_speed;
- ivar->erase_sector = ivar->csd.erase_sector *
+ ivar->erase_sector = ivar->csd.erase_sector *
ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
-
+
err = mmc_send_status(sc, ivar->rca, &status);
if (err != MMC_ERR_NONE) {
device_printf(sc->dev,
@@ -1446,7 +1446,7 @@ mmc_discover_cards(struct mmc_softc *sc)
mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd);
ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE;
ivar->tran_speed = ivar->csd.tran_speed;
- ivar->erase_sector = ivar->csd.erase_sector *
+ ivar->erase_sector = ivar->csd.erase_sector *
ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
err = mmc_send_status(sc, ivar->rca, &status);
@@ -1655,7 +1655,7 @@ mmc_calculate_clock(struct mmc_softc *sc)
int nkid, i, f_max;
device_t *kids;
struct mmc_ivars *ivar;
-
+
f_max = mmcbr_get_f_max(sc->dev);
max_dtr = max_hs_dtr = f_max;
if ((mmcbr_get_caps(sc->dev) & MMC_CAP_HSPEED))
@@ -1770,7 +1770,7 @@ static void
mmc_delayed_attach(void *xsc)
{
struct mmc_softc *sc = xsc;
-
+
mmc_scan(sc);
config_intrhook_disestablish(&sc->config_intrhook);
}
diff --git a/sys/dev/mmc/mmcreg.h b/sys/dev/mmc/mmcreg.h
index f25c0f63..ba4ca93 100644
--- a/sys/dev/mmc/mmcreg.h
+++ b/sys/dev/mmc/mmcreg.h
@@ -355,8 +355,8 @@ struct mmc_request {
*/
#define MMC_OCR_VOLTAGE 0x3fffffffU /* Vdd Voltage mask */
#define MMC_OCR_LOW_VOLTAGE (1u << 7) /* Low Voltage Range -- tbd */
+#define MMC_OCR_MIN_VOLTAGE_SHIFT 7
#define MMC_OCR_200_210 (1U << 8) /* Vdd voltage 2.00 ~ 2.10 */
-#define MMC_OCR_MIN_VOLTAGE_SHIFT 8
#define MMC_OCR_210_220 (1U << 9) /* Vdd voltage 2.10 ~ 2.20 */
#define MMC_OCR_220_230 (1U << 10) /* Vdd voltage 2.20 ~ 2.30 */
#define MMC_OCR_230_240 (1U << 11) /* Vdd voltage 2.30 ~ 2.40 */
diff --git a/sys/dev/mmc/mmcsd.c b/sys/dev/mmc/mmcsd.c
index 7749123..ac697a3 100644
--- a/sys/dev/mmc/mmcsd.c
+++ b/sys/dev/mmc/mmcsd.c
@@ -170,6 +170,7 @@ mmcsd_attach(device_t dev)
d->d_delmaxsize = mmc_get_erase_sector(dev) * d->d_sectorsize;
strlcpy(d->d_ident, mmc_get_card_sn_string(dev), sizeof(d->d_ident));
strlcpy(d->d_descr, mmc_get_card_id_string(dev), sizeof(d->d_descr));
+ d->d_rotation_rate = DISK_RR_NON_ROTATING;
/*
* Display in most natural units. There's no cards < 1MB. The SD
@@ -545,6 +546,8 @@ mmcsd_task(void *arg)
bp->bio_error = EIO;
bp->bio_resid = (end - block) * sz;
bp->bio_flags |= BIO_ERROR;
+ } else {
+ bp->bio_resid = 0;
}
biodone(bp);
}
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
index d44e502..c9d83d8 100644
--- a/sys/dev/mpr/mpr_sas.c
+++ b/sys/dev/mpr/mpr_sas.c
@@ -1846,8 +1846,12 @@ mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
if (csio->ccb_h.flags & CAM_CDB_POINTER)
bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
- else
+ else {
+ KASSERT(csio->cdb_len <= IOCDBLEN,
+ ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER is not set",
+ csio->cdb_len));
bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
+ }
req->IoFlags = htole16(csio->cdb_len);
/*
@@ -2429,6 +2433,7 @@ mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
* driver is being shutdown.
*/
if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
+ (csio->data_ptr != NULL) &&
((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
(sc->mapping_table[target_id].device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
diff --git a/sys/dev/mpr/mpr_sas_lsi.c b/sys/dev/mpr/mpr_sas_lsi.c
index 640338a..aeb9864 100644
--- a/sys/dev/mpr/mpr_sas_lsi.c
+++ b/sys/dev/mpr/mpr_sas_lsi.c
@@ -1056,6 +1056,7 @@ out:
mpr_free_command(sc, cm);
else if (error == 0)
error = EWOULDBLOCK;
+ cm->cm_data = NULL;
free(buffer, M_MPR);
return (error);
}
@@ -1196,18 +1197,18 @@ mprsas_SSU_to_SATA_devices(struct mpr_softc *sc)
continue;
}
- ccb = xpt_alloc_ccb_nowait();
- if (ccb == NULL) {
- mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop "
- "unit.\n");
- return;
- }
-
/*
* The stop_at_shutdown flag will be set if this device is
* a SATA direct-access end device.
*/
if (target->stop_at_shutdown) {
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop "
+ "unit.\n");
+ return;
+ }
+
if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
pathid, targetid, CAM_LUN_WILDCARD) !=
CAM_REQ_CMP) {
diff --git a/sys/dev/nand/nand_geom.c b/sys/dev/nand/nand_geom.c
index 593b5f5..cd3f521 100644
--- a/sys/dev/nand/nand_geom.c
+++ b/sys/dev/nand/nand_geom.c
@@ -394,6 +394,7 @@ create_geom_disk(struct nand_chip *chip)
snprintf(ndisk->d_ident, sizeof(ndisk->d_ident),
"nand: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id);
+ ndisk->d_rotation_rate = DISK_RR_NON_ROTATING;
disk_create(ndisk, DISK_VERSION);
@@ -415,6 +416,7 @@ create_geom_disk(struct nand_chip *chip)
snprintf(rdisk->d_ident, sizeof(rdisk->d_ident),
"nand_raw: Man:0x%02x Dev:0x%02x", chip->id.man_id,
chip->id.dev_id);
+ rdisk->d_rotation_rate = DISK_RR_NON_ROTATING;
disk_create(rdisk, DISK_VERSION);
diff --git a/sys/dev/netmap/netmap_freebsd.c b/sys/dev/netmap/netmap_freebsd.c
index 8490ae8..d9b1a46 100644
--- a/sys/dev/netmap/netmap_freebsd.c
+++ b/sys/dev/netmap/netmap_freebsd.c
@@ -218,30 +218,16 @@ generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
{
int ret;
- /*
- * The mbuf should be a cluster from our special pool,
- * so we do not need to do an m_copyback but just copy
- * (and eventually, just reference the netmap buffer)
- */
+ /* Link the external storage to the netmap buffer, so that
+ * no copy is necessary. */
+ m->m_ext.ext_buf = m->m_data = addr;
+ m->m_ext.ext_size = len;
- if (GET_MBUF_REFCNT(m) != 1) {
- D("invalid refcnt %d for %p",
- GET_MBUF_REFCNT(m), m);
- panic("in generic_xmit_frame");
- }
- // XXX the ext_size check is unnecessary if we link the netmap buf
- if (m->m_ext.ext_size < len) {
- RD(5, "size %d < len %d", m->m_ext.ext_size, len);
- len = m->m_ext.ext_size;
- }
- if (0) { /* XXX seems to have negligible benefits */
- m->m_ext.ext_buf = m->m_data = addr;
- } else {
- bcopy(addr, m->m_data, len);
- }
m->m_len = m->m_pkthdr.len = len;
- // inc refcount. All ours, we could skip the atomic
- atomic_fetchadd_int(PNT_MBUF_REFCNT(m), 1);
+
+ /* mbuf refcnt is not contended, no need to use atomic
+ * (a memory barrier is enough). */
+ SET_MBUF_REFCNT(m, 2);
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
m->m_pkthdr.flowid = ring_nr;
m->m_pkthdr.rcvif = ifp; /* used for tx notification */
diff --git a/sys/dev/netmap/netmap_generic.c b/sys/dev/netmap/netmap_generic.c
index 85a6a9f..c5060f1 100644
--- a/sys/dev/netmap/netmap_generic.c
+++ b/sys/dev/netmap/netmap_generic.c
@@ -90,53 +90,40 @@ __FBSDID("$FreeBSD$");
/*
* FreeBSD mbuf allocator/deallocator in emulation mode:
*
- * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE
- * so that the destructor, if invoked, will not free the packet.
- * In principle we should set the destructor only on demand,
- * but since there might be a race we better do it on allocation.
- * As a consequence, we also need to set the destructor or we
- * would leak buffers.
- */
-
-/*
- * mbuf wrappers
+ * We allocate mbufs with m_gethdr(), since the mbuf header is needed
+ * by the driver. We also attach a customly-provided external storage,
+ * which in this case is a netmap buffer. When calling m_extadd(), however
+ * we pass a NULL address, since the real address (and length) will be
+ * filled in by nm_os_generic_xmit_frame() right before calling
+ * if_transmit().
+ *
+ * The dtor function does nothing, however we need it since mb_free_ext()
+ * has a KASSERT(), checking that the mbuf dtor function is not NULL.
*/
-/* mbuf destructor, also need to change the type to EXT_EXTREF,
- * add an M_NOFREE flag, and then clear the flag and
- * chain into uma_zfree(zone_pack, mf)
- * (or reinstall the buffer ?)
- */
-#define SET_MBUF_DESTRUCTOR(m, fn) do { \
- (m)->m_ext.ext_free = (void *)fn; \
- (m)->m_ext.ext_type = EXT_EXTREF; \
-} while (0)
+static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
-static void
-netmap_default_mbuf_destructor(struct mbuf *m)
+static inline void
+SET_MBUF_DESTRUCTOR(struct mbuf *m, void *fn)
{
- /* restore original mbuf */
- m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1;
- m->m_ext.ext_arg1 = NULL;
- m->m_ext.ext_type = EXT_PACKET;
- m->m_ext.ext_free = NULL;
- if (GET_MBUF_REFCNT(m) == 0)
- SET_MBUF_REFCNT(m, 1);
- uma_zfree(zone_pack, m);
+ m->m_ext.ext_free = fn ? fn : (void *)void_mbuf_dtor;
}
static inline struct mbuf *
netmap_get_mbuf(int len)
{
struct mbuf *m;
- m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
- if (m) {
- m->m_flags |= M_NOFREE; /* XXXNP: Almost certainly incorrect. */
- m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
- m->m_ext.ext_free = (void *)netmap_default_mbuf_destructor;
- m->m_ext.ext_type = EXT_EXTREF;
- ND(5, "create m %p refcnt %d", m, GET_MBUF_REFCNT(m));
+
+ (void)len;
+
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ if (m == NULL) {
+ return m;
}
+
+ m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor,
+ NULL, NULL, 0, EXT_NET_DRV);
+
return m;
}
@@ -412,11 +399,6 @@ static void
generic_mbuf_destructor(struct mbuf *m)
{
netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL);
-#ifdef __FreeBSD__
- if (netmap_verbose)
- RD(5, "Tx irq (%p) queue %d index %d" , m, MBUF_TXQ(m), (int)(uintptr_t)m->m_ext.ext_arg1);
- netmap_default_mbuf_destructor(m);
-#endif /* __FreeBSD__ */
IFRATE(rate_ctx.new.txirq++);
}
@@ -447,7 +429,7 @@ generic_netmap_tx_clean(struct netmap_kring *kring)
// XXX how do we proceed ? break ?
return -ENOMEM;
}
- } else if (GET_MBUF_REFCNT(m) != 1) {
+ } else if (MBUF_REFCNT(m) != 1) {
break; /* This mbuf is still busy: its refcnt is 2. */
}
n++;
@@ -476,62 +458,39 @@ generic_netmap_tx_clean(struct netmap_kring *kring)
return n;
}
-
-/*
- * We have pending packets in the driver between nr_hwtail +1 and hwcur.
- * Compute a position in the middle, to be used to generate
- * a notification.
- */
-static inline u_int
-generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur)
-{
- u_int n = kring->nkr_num_slots;
- u_int ntc = nm_next(kring->nr_hwtail, n-1);
- u_int e;
-
- if (hwcur >= ntc) {
- e = (hwcur + ntc) / 2;
- } else { /* wrap around */
- e = (hwcur + n + ntc) / 2;
- if (e >= n) {
- e -= n;
- }
- }
-
- if (unlikely(e >= n)) {
- D("This cannot happen");
- e = 0;
- }
-
- return e;
-}
-
-/*
- * We have pending packets in the driver between nr_hwtail+1 and hwcur.
- * Schedule a notification approximately in the middle of the two.
- * There is a race but this is only called within txsync which does
- * a double check.
- */
static void
generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
{
+ u_int lim = kring->nkr_num_slots - 1;
struct mbuf *m;
u_int e;
+ u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */
- if (nm_next(kring->nr_hwtail, kring->nkr_num_slots -1) == hwcur) {
+ if (ntc == hwcur) {
return; /* all buffers are free */
}
- e = generic_tx_event_middle(kring, hwcur);
+
+ /*
+ * We have pending packets in the driver between hwtail+1
+ * and hwcur, and we have to chose one of these slot to
+ * generate a notification.
+ * There is a race but this is only called within txsync which
+ * does a double check.
+ */
+
+ /* Choose the first pending slot, to be safe against driver
+ * reordering mbuf transmissions. */
+ e = ntc;
m = kring->tx_pool[e];
- ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? GET_MBUF_REFCNT(m) : -2 );
+ ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 );
if (m == NULL) {
/* This can happen if there is already an event on the netmap
slot 'e': There is nothing to do. */
return;
}
kring->tx_pool[e] = NULL;
- SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
+ SET_MBUF_DESTRUCTOR(m, (void *)generic_mbuf_destructor);
// XXX wmb() ?
/* Decrement the refcount an free it if we have the last one. */
diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h
index d38b3b2..f2e6998 100644
--- a/sys/dev/netmap/netmap_kern.h
+++ b/sys/dev/netmap/netmap_kern.h
@@ -97,13 +97,11 @@ struct netmap_adapter *netmap_getna(if_t ifp);
#endif
#if __FreeBSD_version >= 1100027
-#define GET_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt ? *((m)->m_ext.ext_cnt) : -1)
-#define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ext_cnt) = x
-#define PNT_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt)
+#define MBUF_REFCNT(m) ((m)->m_ext.ext_count)
+#define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x
#else
-#define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
+#define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1)
#define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x
-#define PNT_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt)
#endif
MALLOC_DECLARE(M_NETMAP);
diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c
index c67ae0d..ace4861 100644
--- a/sys/dev/ntb/if_ntb/if_ntb.c
+++ b/sys/dev/ntb/if_ntb/if_ntb.c
@@ -237,6 +237,11 @@ ntb_ioctl(if_t ifp, u_long command, caddr_t data)
int error = 0;
switch (command) {
+ case SIOCSIFFLAGS:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
case SIOCSIFMTU:
{
if (ifr->ifr_mtu > sc->mtu - ETHER_HDR_LEN) {
diff --git a/sys/dev/nvd/nvd.c b/sys/dev/nvd/nvd.c
index 989ed92..11e4f58 100644
--- a/sys/dev/nvd/nvd.c
+++ b/sys/dev/nvd/nvd.c
@@ -338,13 +338,11 @@ nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
*/
nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
-
nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
NVME_MODEL_NUMBER_LENGTH);
-
-#if __FreeBSD_version >= 900034
strlcpy(disk->d_descr, descr, sizeof(descr));
-#endif
+
+ disk->d_rotation_rate = DISK_RR_NON_ROTATING;
ndisk->ns = ns;
ndisk->disk = disk;
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 42d7d7c..b35c057 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -281,12 +281,13 @@ static const struct pci_quirk pci_quirks[] = {
{ 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
/*
- * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
- * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
- * command register is set.
+ * Atheros AR8161/AR8162/E2200/E2400 Ethernet controllers have a
+ * bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
+ * of the command register is set.
*/
{ 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
+ { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
/*
diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c
index dedc55a..553b9cb 100644
--- a/sys/dev/pci/pci_pci.c
+++ b/sys/dev/pci/pci_pci.c
@@ -918,6 +918,7 @@ static void
pcib_probe_hotplug(struct pcib_softc *sc)
{
device_t dev;
+ uint32_t link_cap;
uint16_t link_sta, slot_sta;
if (!pci_enable_pcie_hp)
@@ -930,11 +931,13 @@ pcib_probe_hotplug(struct pcib_softc *sc)
if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT))
return;
- sc->pcie_link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4);
sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4);
if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0)
return;
+ link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4);
+ if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0)
+ return;
/*
* Some devices report that they have an MRL when they actually
@@ -945,8 +948,7 @@ pcib_probe_hotplug(struct pcib_softc *sc)
* If there is an open MRL but the Data Link Layer is active,
* the MRL is not real.
*/
- if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0 &&
- (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE) != 0) {
+ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) {
link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 &&
@@ -1059,10 +1061,8 @@ pcib_hotplug_present(struct pcib_softc *sc)
return (0);
/* Require the Data Link Layer to be active. */
- if (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE) {
- if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE))
- return (0);
- }
+ if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE))
+ return (0);
return (-1);
}
@@ -1119,20 +1119,18 @@ pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
* changed on this interrupt. Stop any scheduled timer if
* the Data Link Layer is active.
*/
- if (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE) {
- if (card_inserted &&
- !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) &&
- sc->pcie_slot_sta &
- (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) {
- if (cold)
- device_printf(sc->dev,
- "Data Link Layer inactive\n");
- else
- callout_reset(&sc->pcie_dll_timer, hz,
- pcib_pcie_dll_timeout, sc);
- } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)
- callout_stop(&sc->pcie_dll_timer);
- }
+ if (card_inserted &&
+ !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) &&
+ sc->pcie_slot_sta &
+ (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) {
+ if (cold)
+ device_printf(sc->dev,
+ "Data Link Layer inactive\n");
+ else
+ callout_reset(&sc->pcie_dll_timer, hz,
+ pcib_pcie_dll_timeout, sc);
+ } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)
+ callout_stop(&sc->pcie_dll_timer);
pcib_pcie_hotplug_command(sc, val, mask);
@@ -1382,7 +1380,7 @@ pcib_setup_hotplug(struct pcib_softc *sc)
mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
- val = PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_HPIE;
+ val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE;
if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB)
val |= PCIEM_SLOT_CTL_ABPE;
if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP)
@@ -1391,8 +1389,6 @@ pcib_setup_hotplug(struct pcib_softc *sc)
val |= PCIEM_SLOT_CTL_MRLSCE;
if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS))
val |= PCIEM_SLOT_CTL_CCIE;
- if (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE)
- val |= PCIEM_SLOT_CTL_DLLSCE;
/* Turn the attention indicator off. */
if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
diff --git a/sys/dev/pci/pcib_private.h b/sys/dev/pci/pcib_private.h
index 65aec8d..9014cff 100644
--- a/sys/dev/pci/pcib_private.h
+++ b/sys/dev/pci/pcib_private.h
@@ -132,7 +132,6 @@ struct pcib_softc
uint16_t bridgectl; /* bridge control register */
uint16_t pcie_link_sta;
uint16_t pcie_slot_sta;
- uint32_t pcie_link_cap;
uint32_t pcie_slot_cap;
struct resource *pcie_irq;
void *pcie_ihand;
diff --git a/sys/dev/qlxgbe/ql_def.h b/sys/dev/qlxgbe/ql_def.h
index 3299719..83774a7 100644
--- a/sys/dev/qlxgbe/ql_def.h
+++ b/sys/dev/qlxgbe/ql_def.h
@@ -112,6 +112,16 @@ typedef struct _qla_tx_ring {
uint64_t count;
} qla_tx_ring_t;
+typedef struct _qla_tx_fp {
+ struct mtx tx_mtx;
+ char tx_mtx_name[32];
+ struct buf_ring *tx_br;
+ struct task fp_task;
+ struct taskqueue *fp_taskqueue;
+ void *ha;
+ uint32_t txr_idx;
+} qla_tx_fp_t;
+
/*
* Adapter structure contains the hardware independent information of the
* pci function.
@@ -178,10 +188,9 @@ struct qla_host {
qla_tx_ring_t tx_ring[NUM_TX_RINGS];
bus_dma_tag_t tx_tag;
- struct task tx_task;
- struct taskqueue *tx_tq;
struct callout tx_callout;
- struct mtx tx_lock;
+
+ qla_tx_fp_t tx_fp[MAX_SDS_RINGS];
qla_rx_ring_t rx_ring[MAX_RDS_RINGS];
bus_dma_tag_t rx_tag;
diff --git a/sys/dev/qlxgbe/ql_glbl.h b/sys/dev/qlxgbe/ql_glbl.h
index 8f92b0e..beafb41 100644
--- a/sys/dev/qlxgbe/ql_glbl.h
+++ b/sys/dev/qlxgbe/ql_glbl.h
@@ -39,6 +39,7 @@
*/
extern void ql_mbx_isr(void *arg);
extern void ql_isr(void *arg);
+extern uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count);
/*
* from ql_os.c
@@ -66,7 +67,7 @@ extern void qla_reset_promisc(qla_host_t *ha);
extern int ql_set_allmulti(qla_host_t *ha);
extern void qla_reset_allmulti(qla_host_t *ha);
extern void ql_update_link_state(qla_host_t *ha);
-extern void ql_hw_tx_done(qla_host_t *ha);
+extern void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
extern int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id);
extern void ql_hw_stop_rcv(qla_host_t *ha);
extern void ql_get_stats(qla_host_t *ha);
@@ -76,7 +77,7 @@ extern void qla_hw_async_event(qla_host_t *ha);
extern int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
uint32_t *num_rcvq);
-extern int qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp);
+extern int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp);
extern void ql_minidump(qla_host_t *ha);
extern int ql_minidump_init(qla_host_t *ha);
diff --git a/sys/dev/qlxgbe/ql_hw.c b/sys/dev/qlxgbe/ql_hw.c
index 5995815..c6f6f01 100644
--- a/sys/dev/qlxgbe/ql_hw.c
+++ b/sys/dev/qlxgbe/ql_hw.c
@@ -51,7 +51,6 @@ static void qla_del_rcv_cntxt(qla_host_t *ha);
static int qla_init_rcv_cntxt(qla_host_t *ha);
static void qla_del_xmt_cntxt(qla_host_t *ha);
static int qla_init_xmt_cntxt(qla_host_t *ha);
-static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
@@ -2047,7 +2046,7 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
ha->hw.iscsi_pkt_count++;
if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
- qla_hw_tx_done_locked(ha, txr_idx);
+ ql_hw_tx_done_locked(ha, txr_idx);
if (hw->tx_cntxt[txr_idx].txr_free <=
(num_tx_cmds + QLA_TX_MIN_FREE)) {
QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
@@ -2552,15 +2551,8 @@ qla_init_rcv_cntxt(qla_host_t *ha)
qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
rcntxt->sds[i].size =
qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
- if (ha->msix_count == 2) {
- rcntxt->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[0]);
- rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
- } else {
- rcntxt->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[i]);
- rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
- }
+ rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
+ rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
}
for (i = 0; i < rcntxt_rds_rings; i++) {
@@ -2672,17 +2664,11 @@ qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
add_rcv->sds[i].size =
qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
- if (ha->msix_count == 2) {
- add_rcv->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[0]);
- add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
- } else {
- add_rcv->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[j]);
- add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
- }
+ add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
+ add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
}
+
for (i = 0; (i < nsds); i++) {
j = i + sds_idx;
@@ -2803,6 +2789,7 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
q80_rsp_tx_cntxt_t *tcntxt_rsp;
uint32_t err;
qla_hw_tx_cntxt_t *hw_tx_cntxt;
+ uint32_t intr_idx;
hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
@@ -2818,6 +2805,8 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
tcntxt->count_version |= Q8_MBX_CMD_VERSION;
+ intr_idx = txr_idx;
+
#ifdef QL_ENABLE_ISCSI_TLV
tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
@@ -2827,8 +2816,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
tcntxt->traffic_class = 1;
}
-#else
+ intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
+#else
tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
@@ -2841,10 +2831,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
- tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
+ tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
-
hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
@@ -3166,11 +3155,11 @@ ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
}
/*
- * Name: qla_hw_tx_done_locked
+ * Name: ql_hw_tx_done_locked
* Function: Handle Transmit Completions
*/
-static void
-qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
+void
+ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
{
qla_tx_buf_t *txb;
qla_hw_t *hw = &ha->hw;
@@ -3208,34 +3197,6 @@ qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
return;
}
-/*
- * Name: ql_hw_tx_done
- * Function: Handle Transmit Completions
- */
-void
-ql_hw_tx_done(qla_host_t *ha)
-{
- int i;
- uint32_t flag = 0;
-
- if (!mtx_trylock(&ha->tx_lock)) {
- QL_DPRINT8(ha, (ha->pci_dev,
- "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
- return;
- }
- for (i = 0; i < ha->hw.num_tx_rings; i++) {
- qla_hw_tx_done_locked(ha, i);
- if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
- flag = 1;
- }
-
- if (!flag)
- ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- QLA_TX_UNLOCK(ha);
- return;
-}
-
void
ql_update_link_state(qla_host_t *ha)
{
@@ -3655,7 +3616,7 @@ qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
}
int
-qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
+ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
{
struct ether_vlan_header *eh;
uint16_t etype;
diff --git a/sys/dev/qlxgbe/ql_hw.h b/sys/dev/qlxgbe/ql_hw.h
index e50bc5e..37090ff 100644
--- a/sys/dev/qlxgbe/ql_hw.h
+++ b/sys/dev/qlxgbe/ql_hw.h
@@ -1543,7 +1543,6 @@ typedef struct _qla_hw_tx_cntxt {
uint32_t tx_prod_reg;
uint16_t tx_cntxt_id;
- uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
} qla_hw_tx_cntxt_t;
diff --git a/sys/dev/qlxgbe/ql_isr.c b/sys/dev/qlxgbe/ql_isr.c
index 1bf6cca..23bbc61 100644
--- a/sys/dev/qlxgbe/ql_isr.c
+++ b/sys/dev/qlxgbe/ql_isr.c
@@ -159,7 +159,12 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
mpf->m_pkthdr.flowid = sgc->rss_hash;
+
+#if __FreeBSD_version >= 1100000
M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
+#else
+ M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
+#endif /* #if __FreeBSD_version >= 1100000 */
(*ifp->if_input)(ifp, mpf);
@@ -449,11 +454,11 @@ qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
}
/*
- * Name: qla_rcv_isr
+ * Name: ql_rcv_isr
* Function: Main Interrupt Service Routine
*/
-static uint32_t
-qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
+uint32_t
+ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
{
device_t dev;
qla_hw_t *hw;
@@ -703,7 +708,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
}
if (ha->flags.stop_rcv)
- goto qla_rcv_isr_exit;
+ goto ql_rcv_isr_exit;
if (hw->sds[sds_idx].sdsr_next != comp_idx) {
QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
@@ -726,7 +731,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
if (opcode)
ret = -1;
-qla_rcv_isr_exit:
+ql_rcv_isr_exit:
hw->sds[sds_idx].rcv_active = 0;
return (ret);
@@ -930,7 +935,7 @@ ql_isr(void *arg)
int idx;
qla_hw_t *hw;
struct ifnet *ifp;
- uint32_t ret = 0;
+ qla_tx_fp_t *fp;
ha = ivec->ha;
hw = &ha->hw;
@@ -939,17 +944,12 @@ ql_isr(void *arg)
if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
return;
- if (idx == 0)
- taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
-
- ret = qla_rcv_isr(ha, idx, -1);
- if (idx == 0)
- taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ fp = &ha->tx_fp[idx];
+
+ if (fp->fp_taskqueue != NULL)
+ taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
- if (!ha->flags.stop_rcv) {
- QL_ENABLE_INTERRUPTS(ha, idx);
- }
return;
}
diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c
index 7592a7d..b7cda78 100644
--- a/sys/dev/qlxgbe/ql_os.c
+++ b/sys/dev/qlxgbe/ql_os.c
@@ -76,11 +76,11 @@ static void qla_release(qla_host_t *ha);
static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static void qla_stop(qla_host_t *ha);
-static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
-static void qla_tx_done(void *context, int pending);
static void qla_get_peer(qla_host_t *ha);
static void qla_error_recovery(void *context, int pending);
static void qla_async_event(void *context, int pending);
+static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
+ uint32_t iscsi_pdu);
/*
* Hooks to the Operating Systems
@@ -93,7 +93,14 @@ static void qla_init(void *arg);
static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
static int qla_media_change(struct ifnet *ifp);
static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
-static void qla_start(struct ifnet *ifp);
+
+static int qla_transmit(struct ifnet *ifp, struct mbuf *mp);
+static void qla_qflush(struct ifnet *ifp);
+static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
+static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
+static int qla_create_fp_taskqueues(qla_host_t *ha);
+static void qla_destroy_fp_taskqueues(qla_host_t *ha);
+static void qla_drain_fp_taskqueues(qla_host_t *ha);
static device_method_t qla_pci_methods[] = {
/* Device interface */
@@ -225,7 +232,6 @@ qla_watchdog(void *arg)
qla_hw_t *hw;
struct ifnet *ifp;
uint32_t i;
- qla_hw_tx_cntxt_t *hw_tx_cntxt;
hw = &ha->hw;
ifp = ha->ifp;
@@ -254,19 +260,14 @@ qla_watchdog(void *arg)
&ha->async_event_task);
}
- for (i = 0; i < ha->hw.num_tx_rings; i++) {
- hw_tx_cntxt = &hw->tx_cntxt[i];
- if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
- hw_tx_cntxt->txr_comp) {
- taskqueue_enqueue(ha->tx_tq,
- &ha->tx_task);
- break;
- }
- }
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
- if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
- taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ if (fp->fp_taskqueue != NULL)
+ taskqueue_enqueue(fp->fp_taskqueue,
+ &fp->fp_task);
}
+
ha->qla_watchdog_paused = 0;
} else {
ha->qla_watchdog_paused = 0;
@@ -322,9 +323,7 @@ qla_pci_attach(device_t dev)
rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
ha->reg_rid);
- mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_SPIN);
-
- mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
qla_add_sysctls(ha);
ql_hw_add_sysctls(ha);
@@ -344,8 +343,9 @@ qla_pci_attach(device_t dev)
}
QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
- " msix_count 0x%x pci_reg %p\n", __func__, ha,
- ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
+ " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
+ ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
+ ha->pci_reg1));
/* initialize hardware */
if (ql_init_hw(ha)) {
@@ -366,14 +366,15 @@ qla_pci_attach(device_t dev)
goto qla_pci_attach_err;
}
device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
- " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
- ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
+ " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
+ __func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
+ ha->pci_reg, ha->pci_reg1, num_rcvq);
#ifdef QL_ENABLE_ISCSI_TLV
if ((ha->msix_count < 64) || (num_rcvq != 32)) {
ha->hw.num_sds_rings = 15;
- ha->hw.num_tx_rings = 32;
+ ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
}
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
ha->hw.num_rds_rings = ha->hw.num_sds_rings;
@@ -421,8 +422,20 @@ qla_pci_attach(device_t dev)
device_printf(dev, "could not setup interrupt\n");
goto qla_pci_attach_err;
}
+
+ ha->tx_fp[i].ha = ha;
+ ha->tx_fp[i].txr_idx = i;
+
+ if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
+ device_printf(dev, "%s: could not allocate tx_br[%d]\n",
+ __func__, i);
+ goto qla_pci_attach_err;
+ }
}
+ if (qla_create_fp_taskqueues(ha) != 0)
+ goto qla_pci_attach_err;
+
printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
@@ -452,13 +465,6 @@ qla_pci_attach(device_t dev)
ha->flags.qla_watchdog_active = 1;
ha->flags.qla_watchdog_pause = 0;
-
- TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
- ha->tx_tq = taskqueue_create("qla_txq", M_NOWAIT,
- taskqueue_thread_enqueue, &ha->tx_tq);
- taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
- device_get_nameunit(ha->pci_dev));
-
callout_init(&ha->tx_callout, TRUE);
ha->flags.qla_callout_init = 1;
@@ -584,11 +590,6 @@ qla_release(qla_host_t *ha)
taskqueue_free(ha->err_tq);
}
- if (ha->tx_tq) {
- taskqueue_drain(ha->tx_tq, &ha->tx_task);
- taskqueue_free(ha->tx_tq);
- }
-
ql_del_cdev(ha);
if (ha->flags.qla_watchdog_active) {
@@ -626,13 +627,15 @@ qla_release(qla_host_t *ha)
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq);
}
+
+ qla_free_tx_br(ha, &ha->tx_fp[i]);
}
+ qla_destroy_fp_taskqueues(ha);
if (ha->msix_count)
pci_release_msi(dev);
if (ha->flags.lock_init) {
- mtx_destroy(&ha->tx_lock);
mtx_destroy(&ha->hw_lock);
}
@@ -807,7 +810,9 @@ qla_init_ifnet(device_t dev, qla_host_t *ha)
ifp->if_softc = ha;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = qla_ioctl;
- ifp->if_start = qla_start;
+
+ ifp->if_transmit = qla_transmit;
+ ifp->if_qflush = qla_qflush;
IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
@@ -817,12 +822,13 @@ qla_init_ifnet(device_t dev, qla_host_t *ha)
ether_ifattach(ifp, qla_get_mac_addr(ha));
- ifp->if_capabilities = IFCAP_HWCSUM |
+ ifp->if_capabilities |= IFCAP_HWCSUM |
IFCAP_TSO4 |
- IFCAP_JUMBO_MTU;
-
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+ IFCAP_JUMBO_MTU |
+ IFCAP_VLAN_HWTAGGING |
+ IFCAP_VLAN_MTU |
+ IFCAP_VLAN_HWTSO |
+ IFCAP_LRO;
ifp->if_capenable = ifp->if_capabilities;
@@ -917,10 +923,13 @@ qla_set_multi(qla_host_t *ha, uint32_t add_multi)
if_maddr_runlock(ifp);
- if (QLA_LOCK(ha, __func__, 1) == 0) {
- ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
- QLA_UNLOCK(ha, __func__);
- }
+ //if (QLA_LOCK(ha, __func__, 1) == 0) {
+ // ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
+ // QLA_UNLOCK(ha, __func__);
+ //}
+ QLA_LOCK(ha, __func__, 1);
+ ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
+ QLA_UNLOCK(ha, __func__);
return (ret);
}
@@ -1125,64 +1134,10 @@ qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
return;
}
-static void
-qla_start(struct ifnet *ifp)
-{
- struct mbuf *m_head;
- qla_host_t *ha = (qla_host_t *)ifp->if_softc;
-
- QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
-
- if (!mtx_trylock(&ha->tx_lock)) {
- QL_DPRINT8(ha, (ha->pci_dev,
- "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
- return;
- }
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING) {
- QL_DPRINT8(ha,
- (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
- QLA_TX_UNLOCK(ha);
- return;
- }
-
- if (!ha->hw.link_up || !ha->watchdog_ticks)
- ql_update_link_state(ha);
-
- if (!ha->hw.link_up) {
- QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
- QLA_TX_UNLOCK(ha);
- return;
- }
-
- while (ifp->if_snd.ifq_head != NULL) {
- IF_DEQUEUE(&ifp->if_snd, m_head);
-
- if (m_head == NULL) {
- QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
- __func__));
- break;
- }
-
- if (qla_send(ha, &m_head)) {
- if (m_head == NULL)
- break;
- QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
- }
- QLA_TX_UNLOCK(ha);
- QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
- return;
-}
static int
-qla_send(qla_host_t *ha, struct mbuf **m_headp)
+qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
+ uint32_t iscsi_pdu)
{
bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
bus_dmamap_t map;
@@ -1190,29 +1145,9 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp)
int ret = -1;
uint32_t tx_idx;
struct mbuf *m_head = *m_headp;
- uint32_t txr_idx = ha->txr_idx;
- uint32_t iscsi_pdu = 0;
QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
- /* check if flowid is set */
-
- if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
-#ifdef QL_ENABLE_ISCSI_TLV
- if (qla_iscsi_pdu(ha, m_head) == 0) {
- iscsi_pdu = 1;
- txr_idx = m_head->m_pkthdr.flowid &
- ((ha->hw.num_tx_rings >> 1) - 1);
- } else {
- txr_idx = m_head->m_pkthdr.flowid &
- (ha->hw.num_tx_rings - 1);
- }
-#else
- txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
-#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
- }
-
-
tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
@@ -1290,16 +1225,302 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp)
return (ret);
}
+static int
+qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
+{
+ snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
+ "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
+
+ mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
+
+ fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
+ M_NOWAIT, &fp->tx_mtx);
+ if (fp->tx_br == NULL) {
+ QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
+ " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
+ return (-ENOMEM);
+ }
+ return 0;
+}
+
+static void
+qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
+{
+ struct mbuf *mp;
+ struct ifnet *ifp = ha->ifp;
+
+ if (mtx_initialized(&fp->tx_mtx)) {
+
+ if (fp->tx_br != NULL) {
+
+ mtx_lock(&fp->tx_mtx);
+
+ while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
+ m_freem(mp);
+ }
+
+ mtx_unlock(&fp->tx_mtx);
+
+ buf_ring_free(fp->tx_br, M_DEVBUF);
+ fp->tx_br = NULL;
+ }
+ mtx_destroy(&fp->tx_mtx);
+ }
+ return;
+}
+
+static void
+qla_fp_taskqueue(void *context, int pending)
+{
+ qla_tx_fp_t *fp;
+ qla_host_t *ha;
+ struct ifnet *ifp;
+ struct mbuf *mp;
+ int ret;
+ uint32_t txr_idx;
+ uint32_t iscsi_pdu = 0;
+ uint32_t rx_pkts_left;
+
+ fp = context;
+
+ if (fp == NULL)
+ return;
+
+ ha = (qla_host_t *)fp->ha;
+
+ ifp = ha->ifp;
+
+ txr_idx = fp->txr_idx;
+
+ mtx_lock(&fp->tx_mtx);
+
+ if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
+ mtx_unlock(&fp->tx_mtx);
+ goto qla_fp_taskqueue_exit;
+ }
+
+ rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
+
+#ifdef QL_ENABLE_ISCSI_TLV
+ ql_hw_tx_done_locked(ha, fp->txr_idx);
+ ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
+ txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1);
+#else
+ ql_hw_tx_done_locked(ha, fp->txr_idx);
+#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
+
+ mp = drbr_peek(ifp, fp->tx_br);
+
+ while (mp != NULL) {
+
+ if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
+#ifdef QL_ENABLE_ISCSI_TLV
+ if (ql_iscsi_pdu(ha, mp) == 0) {
+ iscsi_pdu = 1;
+ }
+#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
+ }
+
+ ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
+
+ if (ret) {
+ if (mp != NULL)
+ drbr_putback(ifp, fp->tx_br, mp);
+ else {
+ drbr_advance(ifp, fp->tx_br);
+ }
+
+ mtx_unlock(&fp->tx_mtx);
+
+ goto qla_fp_taskqueue_exit0;
+ } else {
+ drbr_advance(ifp, fp->tx_br);
+ }
+
+ mp = drbr_peek(ifp, fp->tx_br);
+ }
+
+ mtx_unlock(&fp->tx_mtx);
+
+qla_fp_taskqueue_exit0:
+
+ if (rx_pkts_left || ((mp != NULL) && ret)) {
+ taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
+ } else {
+ if (!ha->flags.stop_rcv) {
+ QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
+ }
+ }
+
+qla_fp_taskqueue_exit:
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
+ return;
+}
+
+static int
+qla_create_fp_taskqueues(qla_host_t *ha)
+{
+ int i;
+ uint8_t tq_name[32];
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+ bzero(tq_name, sizeof (tq_name));
+ snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
+
+ TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
+
+ fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
+ taskqueue_thread_enqueue,
+ &fp->fp_taskqueue);
+
+ if (fp->fp_taskqueue == NULL)
+ return (-1);
+
+ taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
+ tq_name);
+
+ QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
+ fp->fp_taskqueue));
+ }
+
+ return (0);
+}
+
+static void
+qla_destroy_fp_taskqueues(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+ if (fp->fp_taskqueue != NULL) {
+ taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
+ taskqueue_free(fp->fp_taskqueue);
+ fp->fp_taskqueue = NULL;
+ }
+ }
+ return;
+}
+
+static void
+qla_drain_fp_taskqueues(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+ if (fp->fp_taskqueue != NULL) {
+ taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
+ }
+ }
+ return;
+}
+
+static int
+qla_transmit(struct ifnet *ifp, struct mbuf *mp)
+{
+ qla_host_t *ha = (qla_host_t *)ifp->if_softc;
+ qla_tx_fp_t *fp;
+ int rss_id = 0;
+ int ret = 0;
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
+
+#if __FreeBSD_version >= 1100000
+ if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
+#else
+ if (mp->m_flags & M_FLOWID)
+#endif
+ rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
+ ha->hw.num_sds_rings;
+ fp = &ha->tx_fp[rss_id];
+
+ if (fp->tx_br == NULL) {
+ ret = EINVAL;
+ goto qla_transmit_exit;
+ }
+
+ if (mp != NULL) {
+ ret = drbr_enqueue(ifp, fp->tx_br, mp);
+ }
+
+ if (fp->fp_taskqueue != NULL)
+ taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
+
+ ret = 0;
+
+qla_transmit_exit:
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
+ return ret;
+}
+
+static void
+qla_qflush(struct ifnet *ifp)
+{
+ int i;
+ qla_tx_fp_t *fp;
+ struct mbuf *mp;
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+ fp = &ha->tx_fp[i];
+
+ if (fp == NULL)
+ continue;
+
+ if (fp->tx_br) {
+ mtx_lock(&fp->tx_mtx);
+
+ while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
+ m_freem(mp);
+ }
+ mtx_unlock(&fp->tx_mtx);
+ }
+ }
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
+
+ return;
+}
+
+
static void
qla_stop(qla_host_t *ha)
{
struct ifnet *ifp = ha->ifp;
device_t dev;
+ int i = 0;
dev = ha->pci_dev;
ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
- QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp;
+
+ fp = &ha->tx_fp[i];
+
+ if (fp == NULL)
+ continue;
+
+ if (fp->tx_br != NULL) {
+ mtx_lock(&fp->tx_mtx);
+ mtx_unlock(&fp->tx_mtx);
+ }
+ }
ha->flags.qla_watchdog_pause = 1;
@@ -1308,6 +1529,8 @@ qla_stop(qla_host_t *ha)
ha->flags.qla_interface_up = 0;
+ qla_drain_fp_taskqueues(ha);
+
ql_hw_stop_rcv(ha);
ql_del_hw_if(ha);
@@ -1648,25 +1871,6 @@ exit_ql_get_mbuf:
return (ret);
}
-static void
-qla_tx_done(void *context, int pending)
-{
- qla_host_t *ha = context;
- struct ifnet *ifp;
-
- ifp = ha->ifp;
-
- if (!ifp)
- return;
-
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
- return;
- }
- ql_hw_tx_done(ha);
-
- qla_start(ha->ifp);
-}
static void
qla_get_peer(qla_host_t *ha)
@@ -1709,18 +1913,32 @@ qla_error_recovery(void *context, int pending)
qla_host_t *ha = context;
uint32_t msecs_100 = 100;
struct ifnet *ifp = ha->ifp;
+ int i = 0;
(void)QLA_LOCK(ha, __func__, 0);
if (ha->flags.qla_interface_up) {
- ha->hw.imd_compl = 1;
- qla_mdelay(__func__, 300);
+ ha->hw.imd_compl = 1;
+ qla_mdelay(__func__, 300);
- ql_hw_stop_rcv(ha);
+ ql_hw_stop_rcv(ha);
- ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
- QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
+ ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp;
+
+ fp = &ha->tx_fp[i];
+
+ if (fp == NULL)
+ continue;
+
+ if (fp->tx_br != NULL) {
+ mtx_lock(&fp->tx_mtx);
+ mtx_unlock(&fp->tx_mtx);
+ }
+ }
}
QLA_UNLOCK(ha, __func__);
diff --git a/sys/dev/qlxgbe/ql_os.h b/sys/dev/qlxgbe/ql_os.h
index 39b6423c..62296bf 100644
--- a/sys/dev/qlxgbe/ql_os.h
+++ b/sys/dev/qlxgbe/ql_os.h
@@ -148,8 +148,8 @@ MALLOC_DECLARE(M_QLA83XXBUF);
/*
* Locks
*/
-#define QLA_LOCK(ha, str, no_delay) qla_lock(ha, str, no_delay)
-#define QLA_UNLOCK(ha, str) qla_unlock(ha, str)
+#define QLA_LOCK(ha, str, no_delay) mtx_lock(&ha->hw_lock)
+#define QLA_UNLOCK(ha, str) mtx_unlock(&ha->hw_lock)
#define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock);
#define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock);
diff --git a/sys/dev/qlxgbe/ql_ver.h b/sys/dev/qlxgbe/ql_ver.h
index 182fa32..90d61d2 100644
--- a/sys/dev/qlxgbe/ql_ver.h
+++ b/sys/dev/qlxgbe/ql_ver.h
@@ -36,6 +36,6 @@
#define QLA_VERSION_MAJOR 3
#define QLA_VERSION_MINOR 10
-#define QLA_VERSION_BUILD 31
+#define QLA_VERSION_BUILD 33
#endif /* #ifndef _QL_VER_H_ */
diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c
index e625d4f..94db7eb 100644
--- a/sys/dev/sdhci/sdhci.c
+++ b/sys/dev/sdhci/sdhci.c
@@ -73,6 +73,7 @@ static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock);
static void sdhci_start(struct sdhci_slot *slot);
static void sdhci_start_data(struct sdhci_slot *slot, struct mmc_data *data);
+static void sdhci_card_poll(void *);
static void sdhci_card_task(void *, int);
/* helper routines */
@@ -89,6 +90,9 @@ static void sdhci_card_task(void *, int);
#define SDHCI_200_MAX_DIVIDER 256
#define SDHCI_300_MAX_DIVIDER 2046
+#define SDHCI_CARD_PRESENT_TICKS (hz / 5)
+#define SDHCI_INSERT_DELAY_TICKS (hz / 2)
+
/*
* Broadcom BCM577xx Controller Constants
*/
@@ -164,8 +168,7 @@ sdhci_reset(struct sdhci_slot *slot, uint8_t mask)
int timeout;
if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!(RD4(slot, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT))
+ if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot))
return;
}
@@ -230,10 +233,15 @@ sdhci_init(struct sdhci_slot *slot)
slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
- SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
SDHCI_INT_ACMD12ERR;
+
+ if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) &&
+ !(slot->opt & SDHCI_NON_REMOVABLE)) {
+ slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+ }
+
WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
}
@@ -368,6 +376,13 @@ sdhci_set_power(struct sdhci_slot *slot, u_char power)
/* Turn on the power. */
pwr |= SDHCI_POWER_ON;
WR1(slot, SDHCI_POWER_CONTROL, pwr);
+
+ if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) {
+ WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10);
+ DELAY(10);
+ WR1(slot, SDHCI_POWER_CONTROL, pwr);
+ DELAY(300);
+ }
}
static void
@@ -475,23 +490,17 @@ sdhci_transfer_pio(struct sdhci_slot *slot)
}
}
-static void
-sdhci_card_delay(void *arg)
-{
- struct sdhci_slot *slot = arg;
-
- taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task);
-}
-
static void
sdhci_card_task(void *arg, int pending)
{
struct sdhci_slot *slot = arg;
SDHCI_LOCK(slot);
- if (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) {
+ if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) {
if (slot->dev == NULL) {
/* If card is present - attach mmc bus. */
+ if (bootverbose || sdhci_debug)
+ slot_printf(slot, "Card inserted\n");
slot->dev = device_add_child(slot->bus, "mmc", -1);
device_set_ivars(slot->dev, slot);
SDHCI_UNLOCK(slot);
@@ -501,6 +510,8 @@ sdhci_card_task(void *arg, int pending)
} else {
if (slot->dev != NULL) {
/* If no card present - detach mmc bus. */
+ if (bootverbose || sdhci_debug)
+ slot_printf(slot, "Card removed\n");
device_t d = slot->dev;
slot->dev = NULL;
SDHCI_UNLOCK(slot);
@@ -510,6 +521,51 @@ sdhci_card_task(void *arg, int pending)
}
}
+static void
+sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present)
+{
+ bool was_present;
+
+ /*
+ * If there was no card and now there is one, schedule the task to
+ * create the child device after a short delay. The delay is to
+ * debounce the card insert (sometimes the card detect pin stabilizes
+ * before the other pins have made good contact).
+ *
+ * If there was a card present and now it's gone, immediately schedule
+ * the task to delete the child device. No debouncing -- gone is gone,
+ * because once power is removed, a full card re-init is needed, and
+ * that happens by deleting and recreating the child device.
+ */
+ was_present = slot->dev != NULL;
+ if (!was_present && is_present) {
+ taskqueue_enqueue_timeout(taskqueue_swi_giant,
+ &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS);
+ } else if (was_present && !is_present) {
+ taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task);
+ }
+}
+
+void
+sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present)
+{
+
+ SDHCI_LOCK(slot);
+ sdhci_handle_card_present_locked(slot, is_present);
+ SDHCI_UNLOCK(slot);
+}
+
+static void
+sdhci_card_poll(void *arg)
+{
+ struct sdhci_slot *slot = arg;
+
+ sdhci_handle_card_present(slot,
+ SDHCI_GET_CARD_PRESENT(slot->bus, slot));
+ callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS,
+ sdhci_card_poll, slot);
+}
+
int
sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
{
@@ -580,9 +636,11 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
device_printf(dev, "Hardware doesn't specify base clock "
"frequency, using %dMHz as default.\n", SDHCI_DEFAULT_MAX_FREQ);
}
- /* Calculate timeout clock frequency. */
+ /* Calculate/set timeout clock frequency. */
if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) {
slot->timeout_clk = slot->max_clk / 1000;
+ } else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) {
+ slot->timeout_clk = 1000;
} else {
slot->timeout_clk =
(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
@@ -626,6 +684,8 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
slot->opt &= ~SDHCI_HAVE_DMA;
if (slot->quirks & SDHCI_QUIRK_FORCE_DMA)
slot->opt |= SDHCI_HAVE_DMA;
+ if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE)
+ slot->opt |= SDHCI_NON_REMOVABLE;
/*
* Use platform-provided transfer backend
@@ -638,8 +698,9 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
slot_printf(slot, "%uMHz%s %s%s%s%s %s\n",
slot->max_clk / 1000000,
(caps & SDHCI_CAN_DO_HISPD) ? " HS" : "",
- (caps & MMC_CAP_8_BIT_DATA) ? "8bits" :
- ((caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"),
+ (slot->host.caps & MMC_CAP_8_BIT_DATA) ? "8bits" :
+ ((slot->host.caps & MMC_CAP_4_BIT_DATA) ? "4bits" :
+ "1bit"),
(caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "",
(caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "",
(caps & SDHCI_CAN_VDD_180) ? " 1.8V" : "",
@@ -653,9 +714,17 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
"timeout", CTLFLAG_RW, &slot->timeout, 0,
"Maximum timeout for SDHCI transfers (in secs)");
TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot);
- callout_init(&slot->card_callout, 1);
+ TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0,
+ sdhci_card_task, slot);
+ callout_init(&slot->card_poll_callout, 1);
callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0);
+ if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) &&
+ !(slot->opt & SDHCI_NON_REMOVABLE)) {
+ callout_reset(&slot->card_poll_callout,
+ SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot);
+ }
+
return (0);
}
@@ -671,8 +740,9 @@ sdhci_cleanup_slot(struct sdhci_slot *slot)
device_t d;
callout_drain(&slot->timeout_callout);
- callout_drain(&slot->card_callout);
+ callout_drain(&slot->card_poll_callout);
taskqueue_drain(taskqueue_swi_giant, &slot->card_task);
+ taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task);
SDHCI_LOCK(slot);
d = slot->dev;
@@ -718,6 +788,16 @@ sdhci_generic_min_freq(device_t brdev, struct sdhci_slot *slot)
return (slot->max_clk / SDHCI_200_MAX_DIVIDER);
}
+bool
+sdhci_generic_get_card_present(device_t brdev, struct sdhci_slot *slot)
+{
+
+ if (slot->opt & SDHCI_NON_REMOVABLE)
+ return true;
+
+ return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
int
sdhci_generic_update_ios(device_t brdev, device_t reqdev)
{
@@ -815,7 +895,7 @@ static void
sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
{
int flags, timeout;
- uint32_t mask, state;
+ uint32_t mask;
slot->curcmd = cmd;
slot->cmd_done = 0;
@@ -830,11 +910,9 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
return;
}
- /* Read controller present state. */
- state = RD4(slot, SDHCI_PRESENT_STATE);
/* Do not issue command if there is no card, clock or power.
* Controller will not detect timeout without clock active. */
- if ((state & SDHCI_CARD_PRESENT) == 0 ||
+ if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) ||
slot->power == 0 ||
slot->clock == 0) {
cmd->error = MMC_ERR_FAILED;
@@ -860,7 +938,7 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
* (It's usually more like 20-30ms in the real world.)
*/
timeout = 250;
- while (state & mask) {
+ while (mask & RD4(slot, SDHCI_PRESENT_STATE)) {
if (timeout == 0) {
slot_printf(slot, "Controller never released "
"inhibit bit(s).\n");
@@ -871,7 +949,6 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
}
timeout--;
DELAY(1000);
- state = RD4(slot, SDHCI_PRESENT_STATE);
}
/* Prepare command flags. */
@@ -1309,7 +1386,7 @@ sdhci_acmd_irq(struct sdhci_slot *slot)
void
sdhci_generic_intr(struct sdhci_slot *slot)
{
- uint32_t intmask;
+ uint32_t intmask, present;
SDHCI_LOCK(slot);
/* Read slot interrupt status. */
@@ -1323,22 +1400,16 @@ sdhci_generic_intr(struct sdhci_slot *slot)
/* Handle card presence interrupts. */
if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ present = (intmask & SDHCI_INT_CARD_INSERT) != 0;
+ slot->intmask &=
+ ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ slot->intmask |= present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT;
+ WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
+ WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
WR4(slot, SDHCI_INT_STATUS, intmask &
(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE));
-
- if (intmask & SDHCI_INT_CARD_REMOVE) {
- if (bootverbose || sdhci_debug)
- slot_printf(slot, "Card removed\n");
- callout_stop(&slot->card_callout);
- taskqueue_enqueue(taskqueue_swi_giant,
- &slot->card_task);
- }
- if (intmask & SDHCI_INT_CARD_INSERT) {
- if (bootverbose || sdhci_debug)
- slot_printf(slot, "Card inserted\n");
- callout_reset(&slot->card_callout, hz / 2,
- sdhci_card_delay, slot);
- }
+ sdhci_handle_card_present_locked(slot, present);
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
}
/* Handle command interrupts. */
diff --git a/sys/dev/sdhci/sdhci.h b/sys/dev/sdhci/sdhci.h
index 2638b4f..4626816 100644
--- a/sys/dev/sdhci/sdhci.h
+++ b/sys/dev/sdhci/sdhci.h
@@ -65,6 +65,14 @@
#define SDHCI_QUIRK_DONT_SET_HISPD_BIT (1<<15)
/* Alternate clock source is required when supplying a 400 KHz clock. */
#define SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC (1<<16)
+/* Card insert/remove interrupts don't work, polling required. */
+#define SDHCI_QUIRK_POLL_CARD_PRESENT (1<<17)
+/* All controller slots are non-removable. */
+#define SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE (1<<18)
+/* Issue custom Intel controller reset sequence after power-up. */
+#define SDHCI_QUIRK_INTEL_POWER_UP_RESET (1<<19)
+/* Data timeout is invalid, use 1 MHz clock instead. */
+#define SDHCI_QUIRK_DATA_TIMEOUT_1MHZ (1<<20)
/*
* Controller registers
@@ -273,8 +281,9 @@ struct sdhci_slot {
device_t dev; /* Slot device */
u_char num; /* Slot number */
u_char opt; /* Slot options */
-#define SDHCI_HAVE_DMA 1
-#define SDHCI_PLATFORM_TRANSFER 2
+#define SDHCI_HAVE_DMA 0x01
+#define SDHCI_PLATFORM_TRANSFER 0x02
+#define SDHCI_NON_REMOVABLE 0x04
u_char version;
int timeout; /* Transfer timeout */
uint32_t max_clk; /* Max possible freq */
@@ -284,7 +293,9 @@ struct sdhci_slot {
u_char *dmamem;
bus_addr_t paddr; /* DMA buffer address */
struct task card_task; /* Card presence check task */
- struct callout card_callout; /* Card insert delay callout */
+ struct timeout_task
+ card_delayed_task;/* Card insert delayed task */
+ struct callout card_poll_callout;/* Card present polling callout */
struct callout timeout_callout;/* Card command/data response timeout */
struct mmc_host host; /* Host parameters */
struct mmc_request *req; /* Current request */
@@ -322,5 +333,7 @@ int sdhci_generic_acquire_host(device_t brdev, device_t reqdev);
int sdhci_generic_release_host(device_t brdev, device_t reqdev);
void sdhci_generic_intr(struct sdhci_slot *slot);
uint32_t sdhci_generic_min_freq(device_t brdev, struct sdhci_slot *slot);
+bool sdhci_generic_get_card_present(device_t brdev, struct sdhci_slot *slot);
+void sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present);
#endif /* __SDHCI_H__ */
diff --git a/sys/dev/sdhci/sdhci_acpi.c b/sys/dev/sdhci/sdhci_acpi.c
new file mode 100644
index 0000000..a2a2cab
--- /dev/null
+++ b/sys/dev/sdhci/sdhci_acpi.c
@@ -0,0 +1,370 @@
+/*-
+ * Copyright (c) 2017 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <dev/mmc/bridge.h>
+#include <dev/mmc/mmcreg.h>
+#include <dev/mmc/mmcbrvar.h>
+
+#include "sdhci.h"
+#include "mmcbr_if.h"
+#include "sdhci_if.h"
+
+static const struct sdhci_acpi_device {
+ const char* hid;
+ int uid;
+ const char *desc;
+ u_int quirks;
+} sdhci_acpi_devices[] = {
+ { "80860F14", 1, "Intel Bay Trail eMMC 4.5 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { "80860F16", 0, "Intel Bay Trail SD Host Controller",
+ 0 },
+ { NULL, 0, NULL, 0}
+};
+
+static char *sdhci_ids[] = {
+ "80860F14",
+ "80860F16",
+ NULL
+};
+
+struct sdhci_acpi_softc {
+ u_int quirks; /* Chip specific quirks */
+ struct resource *irq_res; /* IRQ resource */
+ void *intrhand; /* Interrupt handle */
+
+ struct sdhci_slot slot;
+ struct resource *mem_res; /* Memory resource */
+};
+
+static void sdhci_acpi_intr(void *arg);
+static int sdhci_acpi_detach(device_t dev);
+
+static uint8_t
+sdhci_acpi_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_barrier(sc->mem_res, 0, 0xFF,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ return bus_read_1(sc->mem_res, off);
+}
+
+static void
+sdhci_acpi_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_barrier(sc->mem_res, 0, 0xFF,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ bus_write_1(sc->mem_res, off, val);
+}
+
+static uint16_t
+sdhci_acpi_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_barrier(sc->mem_res, 0, 0xFF,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ return bus_read_2(sc->mem_res, off);
+}
+
+static void
+sdhci_acpi_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_barrier(sc->mem_res, 0, 0xFF,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ bus_write_2(sc->mem_res, off, val);
+}
+
+static uint32_t
+sdhci_acpi_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_barrier(sc->mem_res, 0, 0xFF,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ return bus_read_4(sc->mem_res, off);
+}
+
+static void
+sdhci_acpi_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_barrier(sc->mem_res, 0, 0xFF,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ bus_write_4(sc->mem_res, off, val);
+}
+
+static void
+sdhci_acpi_read_multi_4(device_t dev, struct sdhci_slot *slot,
+ bus_size_t off, uint32_t *data, bus_size_t count)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_read_multi_stream_4(sc->mem_res, off, data, count);
+}
+
+static void
+sdhci_acpi_write_multi_4(device_t dev, struct sdhci_slot *slot,
+ bus_size_t off, uint32_t *data, bus_size_t count)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ bus_write_multi_stream_4(sc->mem_res, off, data, count);
+}
+
+static const struct sdhci_acpi_device *
+sdhci_acpi_find_device(device_t dev)
+{
+ const char *hid;
+ int i, uid;
+ ACPI_HANDLE handle;
+ ACPI_STATUS status;
+
+ hid = ACPI_ID_PROBE(device_get_parent(dev), dev, sdhci_ids);
+ if (hid == NULL)
+ return (NULL);
+
+ handle = acpi_get_handle(dev);
+ status = acpi_GetInteger(handle, "_UID", &uid);
+ if (ACPI_FAILURE(status))
+ uid = 0;
+
+ for (i = 0; sdhci_acpi_devices[i].hid != NULL; i++) {
+ if (strcmp(sdhci_acpi_devices[i].hid, hid) != 0)
+ continue;
+ if ((sdhci_acpi_devices[i].uid != 0) &&
+ (sdhci_acpi_devices[i].uid != uid))
+ continue;
+ return &sdhci_acpi_devices[i];
+ }
+
+ return (NULL);
+}
+
+static int
+sdhci_acpi_probe(device_t dev)
+{
+ const struct sdhci_acpi_device *acpi_dev;
+
+ acpi_dev = sdhci_acpi_find_device(dev);
+ if (acpi_dev == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, acpi_dev->desc);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+sdhci_acpi_attach(device_t dev)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+ int rid, err;
+ const struct sdhci_acpi_device *acpi_dev;
+
+ acpi_dev = sdhci_acpi_find_device(dev);
+ if (acpi_dev == NULL)
+ return (ENXIO);
+
+ sc->quirks = acpi_dev->quirks;
+
+ /* Allocate IRQ. */
+ rid = 0;
+ sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE);
+ if (sc->irq_res == NULL) {
+ device_printf(dev, "can't allocate IRQ\n");
+ return (ENOMEM);
+ }
+
+ rid = 0;
+ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+ if (sc->mem_res == NULL) {
+ device_printf(dev, "can't allocate memory resource for slot\n");
+ sdhci_acpi_detach(dev);
+ return (ENOMEM);
+ }
+
+ sc->slot.quirks = sc->quirks;
+
+ err = sdhci_init_slot(dev, &sc->slot, 0);
+ if (err) {
+ device_printf(dev, "failed to init slot\n");
+ sdhci_acpi_detach(dev);
+ return (err);
+ }
+
+ /* Activate the interrupt */
+ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
+ NULL, sdhci_acpi_intr, sc, &sc->intrhand);
+ if (err) {
+ device_printf(dev, "can't setup IRQ\n");
+ sdhci_acpi_detach(dev);
+ return (err);
+ }
+
+ /* Process cards detection. */
+ sdhci_start_slot(&sc->slot);
+
+ return (0);
+}
+
+static int
+sdhci_acpi_detach(device_t dev)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+
+ if (sc->intrhand)
+ bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
+ if (sc->irq_res)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ rman_get_rid(sc->irq_res), sc->irq_res);
+
+ if (sc->mem_res) {
+ sdhci_cleanup_slot(&sc->slot);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->mem_res), sc->mem_res);
+ }
+
+ return (0);
+}
+
+static int
+sdhci_acpi_shutdown(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+sdhci_acpi_suspend(device_t dev)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+ int err;
+
+ err = bus_generic_suspend(dev);
+ if (err)
+ return (err);
+ sdhci_generic_suspend(&sc->slot);
+ return (0);
+}
+
+static int
+sdhci_acpi_resume(device_t dev)
+{
+ struct sdhci_acpi_softc *sc = device_get_softc(dev);
+ int err;
+
+ sdhci_generic_resume(&sc->slot);
+ err = bus_generic_resume(dev);
+ if (err)
+ return (err);
+ return (0);
+}
+
+static void
+sdhci_acpi_intr(void *arg)
+{
+ struct sdhci_acpi_softc *sc = (struct sdhci_acpi_softc *)arg;
+
+ sdhci_generic_intr(&sc->slot);
+}
+
+static device_method_t sdhci_methods[] = {
+ /* device_if */
+ DEVMETHOD(device_probe, sdhci_acpi_probe),
+ DEVMETHOD(device_attach, sdhci_acpi_attach),
+ DEVMETHOD(device_detach, sdhci_acpi_detach),
+ DEVMETHOD(device_shutdown, sdhci_acpi_shutdown),
+ DEVMETHOD(device_suspend, sdhci_acpi_suspend),
+ DEVMETHOD(device_resume, sdhci_acpi_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar),
+ DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar),
+
+ /* mmcbr_if */
+ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
+ DEVMETHOD(mmcbr_request, sdhci_generic_request),
+ DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro),
+ DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
+ DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
+
+ /* SDHCI registers accessors */
+ DEVMETHOD(sdhci_read_1, sdhci_acpi_read_1),
+ DEVMETHOD(sdhci_read_2, sdhci_acpi_read_2),
+ DEVMETHOD(sdhci_read_4, sdhci_acpi_read_4),
+ DEVMETHOD(sdhci_read_multi_4, sdhci_acpi_read_multi_4),
+ DEVMETHOD(sdhci_write_1, sdhci_acpi_write_1),
+ DEVMETHOD(sdhci_write_2, sdhci_acpi_write_2),
+ DEVMETHOD(sdhci_write_4, sdhci_acpi_write_4),
+ DEVMETHOD(sdhci_write_multi_4, sdhci_acpi_write_multi_4),
+
+ DEVMETHOD_END
+};
+
+static driver_t sdhci_acpi_driver = {
+ "sdhci_acpi",
+ sdhci_methods,
+ sizeof(struct sdhci_acpi_softc),
+};
+static devclass_t sdhci_acpi_devclass;
+
+DRIVER_MODULE(sdhci_acpi, acpi, sdhci_acpi_driver, sdhci_acpi_devclass, NULL,
+ NULL);
+MODULE_DEPEND(sdhci_acpi, sdhci, 1, 1, 1);
+DRIVER_MODULE(mmc, sdhci_acpi, mmc_driver, mmc_devclass, NULL, NULL);
+MODULE_DEPEND(sdhci_acpi, mmc, 1, 1, 1);
diff --git a/sys/dev/sdhci/sdhci_if.m b/sys/dev/sdhci/sdhci_if.m
index b33cdcf..da02d31 100644
--- a/sys/dev/sdhci/sdhci_if.m
+++ b/sys/dev/sdhci/sdhci_if.m
@@ -152,3 +152,9 @@ METHOD uint32_t min_freq {
device_t brdev;
struct sdhci_slot *slot;
} DEFAULT sdhci_generic_min_freq;
+
+METHOD bool get_card_present {
+ device_t brdev;
+ struct sdhci_slot *slot;
+} DEFAULT sdhci_generic_get_card_present;
+
diff --git a/sys/dev/sdhci/sdhci_pci.c b/sys/dev/sdhci/sdhci_pci.c
index 1ce4e9d..aec4000 100644
--- a/sys/dev/sdhci/sdhci_pci.c
+++ b/sys/dev/sdhci/sdhci_pci.c
@@ -63,15 +63,15 @@ __FBSDID("$FreeBSD$");
#define PCI_SDHCI_IFVENDOR 0x02
#define PCI_SLOT_INFO 0x40 /* 8 bits */
-#define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1)
-#define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7)
+#define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1)
+#define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7)
/*
* RICOH specific PCI registers
*/
#define SDHC_PCI_MODE_KEY 0xf9
#define SDHC_PCI_MODE 0x150
-#define SDHC_PCI_MODE_SD20 0x10
+#define SDHC_PCI_MODE_SD20 0x10
#define SDHC_PCI_BASE_FREQ_KEY 0xfc
#define SDHC_PCI_BASE_FREQ 0xe1
@@ -107,6 +107,19 @@ static const struct sdhci_device {
SDHCI_QUIRK_RESET_AFTER_REQUEST },
{ 0x16bc14e4, 0xffff, "Broadcom BCM577xx SDXC/MMC Card Reader",
SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC },
+ { 0x0f148086, 0xffff, "Intel Bay Trail eMMC 4.5 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { 0x0f508086, 0xffff, "Intel Bay Trail eMMC 4.5 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { 0x22948086, 0xffff, "Intel Braswell eMMC 4.5.1 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_DATA_TIMEOUT_1MHZ |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { 0x5acc8086, 0xffff, "Intel Apollo Lake eMMC 5.0 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
{ 0, 0xffff, NULL,
0 }
};
@@ -119,8 +132,8 @@ struct sdhci_pci_softc {
int num_slots; /* Number of slots on this controller */
struct sdhci_slot slots[6];
struct resource *mem_res[6]; /* Memory resource */
- uint8_t cfg_freq; /* Saved mode */
- uint8_t cfg_mode; /* Saved frequency */
+ uint8_t cfg_freq; /* Saved frequency */
+ uint8_t cfg_mode; /* Saved mode */
};
static int sdhci_enable_msi = 1;
@@ -450,11 +463,11 @@ static device_method_t sdhci_methods[] = {
DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar),
/* mmcbr_if */
- DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
- DEVMETHOD(mmcbr_request, sdhci_generic_request),
- DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro),
- DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
- DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
+ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
+ DEVMETHOD(mmcbr_request, sdhci_generic_request),
+ DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro),
+ DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
+ DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
/* SDHCI registers accessors */
DEVMETHOD(sdhci_read_1, sdhci_pci_read_1),
diff --git a/sys/dev/sfxge/common/efx_mcdi.c b/sys/dev/sfxge/common/efx_mcdi.c
index e4a918a..4e7ff53 100644
--- a/sys/dev/sfxge/common/efx_mcdi.c
+++ b/sys/dev/sfxge/common/efx_mcdi.c
@@ -1725,7 +1725,8 @@ static __checkReturn efx_rc_t
efx_mcdi_mac_stats(
__in efx_nic_t *enp,
__in_opt efsys_mem_t *esmp,
- __in efx_stats_action_t action)
+ __in efx_stats_action_t action,
+ __in uint16_t period_ms)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
@@ -1750,7 +1751,7 @@ efx_mcdi_mac_stats(
MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
MAC_STATS_IN_PERIODIC_NOEVENT, !events,
- MAC_STATS_IN_PERIOD_MS, (enable | events) ? 1000 : 0);
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
if (esmp != NULL) {
int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
@@ -1800,7 +1801,7 @@ efx_mcdi_mac_stats_clear(
{
efx_rc_t rc;
- if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR)) != 0)
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
goto fail1;
return (0);
@@ -1823,7 +1824,7 @@ efx_mcdi_mac_stats_upload(
* avoid having to pull the statistics buffer into the cache to
* maintain cumulative statistics.
*/
- if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD)) != 0)
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
goto fail1;
return (0);
@@ -1838,7 +1839,7 @@ fail1:
efx_mcdi_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
- __in uint16_t period,
+ __in uint16_t period_ms,
__in boolean_t events)
{
efx_rc_t rc;
@@ -1847,14 +1848,17 @@ efx_mcdi_mac_stats_periodic(
* The MC DMAs aggregate statistics for our convenience, so we can
* avoid having to pull the statistics buffer into the cache to
* maintain cumulative statistics.
- * Huntington uses a fixed 1sec period, so use that on Siena too.
+ * Huntington uses a fixed 1sec period.
+ * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
*/
- if (period == 0)
- rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE);
+ if (period_ms == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
else if (events)
- rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS);
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
+ period_ms);
else
- rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS);
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
+ period_ms);
if (rc != 0)
goto fail1;
diff --git a/sys/dev/sfxge/common/efx_mcdi.h b/sys/dev/sfxge/common/efx_mcdi.h
index ffa50f1..ee11789 100644
--- a/sys/dev/sfxge/common/efx_mcdi.h
+++ b/sys/dev/sfxge/common/efx_mcdi.h
@@ -218,7 +218,7 @@ extern __checkReturn efx_rc_t
efx_mcdi_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
- __in uint16_t period,
+ __in uint16_t period_ms,
__in boolean_t events);
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
index 2000652..ed8b49d 100644
--- a/sys/dev/sfxge/sfxge.h
+++ b/sys/dev/sfxge/sfxge.h
@@ -159,6 +159,8 @@ enum sfxge_evq_state {
#define SFXGE_EV_BATCH 16384
+#define SFXGE_STATS_UPDATE_PERIOD_MS 1000
+
struct sfxge_evq {
/* Structure members below are sorted by usage order */
struct sfxge_softc *sc;
@@ -246,6 +248,7 @@ struct sfxge_port {
#endif
struct sfxge_hw_stats phy_stats;
struct sfxge_hw_stats mac_stats;
+ uint16_t stats_update_period_ms;
efx_link_mode_t link_mode;
uint8_t mcast_addrs[EFX_MAC_MULTICAST_LIST_MAX *
EFX_MAC_ADDR_LEN];
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
index 58793ca..5723f31 100644
--- a/sys/dev/sfxge/sfxge_port.c
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -43,6 +43,15 @@ __FBSDID("$FreeBSD$");
#include "sfxge.h"
+#define SFXGE_PARAM_STATS_UPDATE_PERIOD_MS \
+ SFXGE_PARAM(stats_update_period_ms)
+static int sfxge_stats_update_period_ms = SFXGE_STATS_UPDATE_PERIOD_MS;
+TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD_MS,
+ &sfxge_stats_update_period_ms);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period_ms, CTLFLAG_RDTUN,
+ &sfxge_stats_update_period_ms, 0,
+ "netstat interface statistics update period in milliseconds");
+
static int sfxge_phy_cap_mask(struct sfxge_softc *, int, uint32_t *);
static int
@@ -51,6 +60,7 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
struct sfxge_port *port = &sc->port;
efsys_mem_t *esmp = &(port->mac_stats.dma_buf);
clock_t now;
+ unsigned int min_ticks;
unsigned int count;
int rc;
@@ -61,8 +71,10 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
goto out;
}
+ min_ticks = (unsigned int)hz * port->stats_update_period_ms / 1000;
+
now = ticks;
- if ((unsigned int)(now - port->mac_stats.update_time) < (unsigned int)hz) {
+ if ((unsigned int)(now - port->mac_stats.update_time) < min_ticks) {
rc = 0;
goto out;
}
@@ -510,9 +522,10 @@ sfxge_port_start(struct sfxge_softc *sc)
sfxge_mac_filter_set_locked(sc);
- /* Update MAC stats by DMA every second */
+ /* Update MAC stats by DMA every period */
if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
- 1000, B_FALSE)) != 0)
+ port->stats_update_period_ms,
+ B_FALSE)) != 0)
goto fail6;
if ((rc = efx_mac_drain(enp, B_FALSE)) != 0)
@@ -669,6 +682,68 @@ sfxge_port_fini(struct sfxge_softc *sc)
port->sc = NULL;
}
+static uint16_t
+sfxge_port_stats_update_period_ms(struct sfxge_softc *sc)
+{
+ int period_ms = sfxge_stats_update_period_ms;
+
+ if (period_ms < 0) {
+ device_printf(sc->dev,
+ "treat negative stats update period %d as 0 (disable)\n",
+ period_ms);
+ period_ms = 0;
+ } else if (period_ms > UINT16_MAX) {
+ device_printf(sc->dev,
+ "treat too big stats update period %d as %u\n",
+ period_ms, UINT16_MAX);
+ period_ms = UINT16_MAX;
+ }
+
+ return period_ms;
+}
+
+static int
+sfxge_port_stats_update_period_ms_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_port *port;
+ unsigned int period_ms;
+ int error;
+
+ sc = arg1;
+ port = &sc->port;
+
+ if (req->newptr != NULL) {
+ error = SYSCTL_IN(req, &period_ms, sizeof(period_ms));
+ if (error != 0)
+ return (error);
+
+ if (period_ms > UINT16_MAX)
+ return (EINVAL);
+
+ SFXGE_PORT_LOCK(port);
+
+ if (port->stats_update_period_ms != period_ms) {
+ if (port->init_state == SFXGE_PORT_STARTED)
+ error = efx_mac_stats_periodic(sc->enp,
+ &port->mac_stats.dma_buf,
+ period_ms, B_FALSE);
+ if (error == 0)
+ port->stats_update_period_ms = period_ms;
+ }
+
+ SFXGE_PORT_UNLOCK(port);
+ } else {
+ SFXGE_PORT_LOCK(port);
+ period_ms = port->stats_update_period_ms;
+ SFXGE_PORT_UNLOCK(port);
+
+ error = SYSCTL_OUT(req, &period_ms, sizeof(period_ms));
+ }
+
+ return (error);
+}
+
int
sfxge_port_init(struct sfxge_softc *sc)
{
@@ -717,8 +792,14 @@ sfxge_port_init(struct sfxge_softc *sc)
M_SFXGE, M_WAITOK | M_ZERO);
if ((rc = sfxge_dma_alloc(sc, EFX_MAC_STATS_SIZE, mac_stats_buf)) != 0)
goto fail2;
+ port->stats_update_period_ms = sfxge_port_stats_update_period_ms(sc);
sfxge_mac_stat_init(sc);
+ SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "stats_update_period_ms", CTLTYPE_UINT|CTLFLAG_RW, sc, 0,
+ sfxge_port_stats_update_period_ms_handler, "IU",
+ "interface statistics refresh period");
+
port->init_state = SFXGE_PORT_INITIALIZED;
DBGPRINT(sc->dev, "success");
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index 4e31aec..dd78e6e 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -363,8 +363,22 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
KASSERT(!txq->blocked, ("txq->blocked"));
+#if SFXGE_TX_PARSE_EARLY
+ /*
+ * If software TSO is used, we still need to copy packet header,
+ * even if we have already parsed it early before enqueue.
+ */
+ if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) &&
+ (txq->tso_fw_assisted == 0))
+ prefetch_read_many(mbuf->m_data);
+#else
+ /*
+ * Prefetch packet header since we need to parse it and extract
+ * IP ID, TCP sequence number and flags.
+ */
if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
prefetch_read_many(mbuf->m_data);
+#endif
if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
rc = EINTR;
diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c
index 548cb60..abcb0d1 100644
--- a/sys/dev/sound/pci/hda/hdaa_patches.c
+++ b/sys/dev/sound/pci/hda/hdaa_patches.c
@@ -739,6 +739,12 @@ hdaa_patch_direct(struct hdaa_devinfo *devinfo)
hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid,
0xf88, 0xc0));
break;
+ case HDA_CODEC_ALC1150:
+ if (subid == 0xd9781462) {
+ /* Too low volume on MSI H170 GAMING M3. */
+ hdaa_write_coef(dev, 0x20, 0x07, 0x7cb);
+ }
+ break;
}
if (subid == APPLE_INTEL_MAC)
hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid,
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 788e5ad..2737f89 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -368,6 +368,7 @@
#define HDA_CODEC_ALC889 HDA_CODEC_CONSTRUCT(REALTEK, 0x0889)
#define HDA_CODEC_ALC892 HDA_CODEC_CONSTRUCT(REALTEK, 0x0892)
#define HDA_CODEC_ALC899 HDA_CODEC_CONSTRUCT(REALTEK, 0x0899)
+#define HDA_CODEC_ALC1150 HDA_CODEC_CONSTRUCT(REALTEK, 0x0900)
#define HDA_CODEC_ALCXXXX HDA_CODEC_CONSTRUCT(REALTEK, 0xffff)
/* Motorola */
diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c
index c8d617e..d186a82 100644
--- a/sys/dev/sound/pci/hda/hdacc.c
+++ b/sys/dev/sound/pci/hda/hdacc.c
@@ -111,6 +111,7 @@ static const struct {
{ HDA_CODEC_ALC889, 0, "Realtek ALC889" },
{ HDA_CODEC_ALC892, 0, "Realtek ALC892" },
{ HDA_CODEC_ALC899, 0, "Realtek ALC899" },
+ { HDA_CODEC_ALC1150, 0, "Realtek ALC1150" },
{ HDA_CODEC_AD1882, 0, "Analog Devices AD1882" },
{ HDA_CODEC_AD1882A, 0, "Analog Devices AD1882A" },
{ HDA_CODEC_AD1883, 0, "Analog Devices AD1883" },
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index 90a85fc..d4a38a8 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -2261,6 +2261,11 @@ usb_needs_explore(struct usb_bus *bus, uint8_t do_probe)
DPRINTF("\n");
+ if (cold != 0) {
+ DPRINTF("Cold\n");
+ return;
+ }
+
if (bus == NULL) {
DPRINTF("No bus pointer!\n");
return;
@@ -2326,6 +2331,26 @@ usb_needs_explore_all(void)
}
/*------------------------------------------------------------------------*
+ * usb_needs_explore_init
+ *
+ * This function will ensure that the USB controllers are not enumerated
+ * until the "cold" variable is cleared.
+ *------------------------------------------------------------------------*/
+static void
+usb_needs_explore_init(void *arg)
+{
+ /*
+ * The cold variable should be cleared prior to this function
+ * being called:
+ */
+ if (cold == 0)
+ usb_needs_explore_all();
+ else
+ DPRINTFN(-1, "Cold variable is still set!\n");
+}
+SYSINIT(usb_needs_explore_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, usb_needs_explore_init, NULL);
+
+/*------------------------------------------------------------------------*
* usb_bus_power_update
*
* This function will ensure that all USB devices on the given bus are
diff --git a/sys/dev/usb/usb_process.c b/sys/dev/usb/usb_process.c
index 683c700..16d93b4 100644
--- a/sys/dev/usb/usb_process.c
+++ b/sys/dev/usb/usb_process.c
@@ -454,14 +454,15 @@ usb_proc_drain(struct usb_process *up)
up->up_csleep = 0;
cv_signal(&up->up_cv);
}
+#ifndef EARLY_AP_STARTUP
/* Check if we are still cold booted */
-
if (cold) {
USB_THREAD_SUSPEND(up->up_ptr);
printf("WARNING: A USB process has "
"been left suspended\n");
break;
}
+#endif
cv_wait(&up->up_cv, up->up_mtx);
}
/* Check if someone is waiting - should not happen */
diff --git a/sys/fs/nfs/nfs_commonsubs.c b/sys/fs/nfs/nfs_commonsubs.c
index 7b65a60..d41856e 100644
--- a/sys/fs/nfs/nfs_commonsubs.c
+++ b/sys/fs/nfs/nfs_commonsubs.c
@@ -2047,7 +2047,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
nfsattrbit_t *retbitp = &retbits;
u_int32_t freenum, *retnump;
u_int64_t uquad;
- struct statfs fs;
+ struct statfs *fs;
struct nfsfsinfo fsinf;
struct timespec temptime;
NFSACL_T *aclp, *naclp = NULL;
@@ -2079,11 +2079,13 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
/*
* Get the VFS_STATFS(), since some attributes need them.
*/
+ fs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
if (NFSISSETSTATFS_ATTRBIT(retbitp)) {
- error = VFS_STATFS(mp, &fs);
+ error = VFS_STATFS(mp, fs);
if (error != 0) {
if (reterr) {
nd->nd_repstat = NFSERR_ACCES;
+ free(fs, M_STATFS);
return (0);
}
NFSCLRSTATFS_ATTRBIT(retbitp);
@@ -2115,6 +2117,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
if (error != 0) {
if (reterr) {
nd->nd_repstat = NFSERR_ACCES;
+ free(fs, M_STATFS);
return (0);
}
NFSCLRBIT_ATTRBIT(retbitp, NFSATTRBIT_ACL);
@@ -2256,7 +2259,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
/*
* Check quota and use min(quota, f_ffree).
*/
- freenum = fs.f_ffree;
+ freenum = fs->f_ffree;
#ifdef QUOTA
/*
* ufs_quotactl() insists that the uid argument
@@ -2279,13 +2282,13 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
case NFSATTRBIT_FILESFREE:
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
*tl++ = 0;
- *tl = txdr_unsigned(fs.f_ffree);
+ *tl = txdr_unsigned(fs->f_ffree);
retnum += NFSX_HYPER;
break;
case NFSATTRBIT_FILESTOTAL:
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
*tl++ = 0;
- *tl = txdr_unsigned(fs.f_files);
+ *tl = txdr_unsigned(fs->f_files);
retnum += NFSX_HYPER;
break;
case NFSATTRBIT_FSLOCATIONS:
@@ -2361,9 +2364,9 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
break;
case NFSATTRBIT_QUOTAHARD:
if (priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
- freenum = fs.f_bfree;
+ freenum = fs->f_bfree;
else
- freenum = fs.f_bavail;
+ freenum = fs->f_bavail;
#ifdef QUOTA
/*
* ufs_quotactl() insists that the uid argument
@@ -2379,15 +2382,15 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
#endif /* QUOTA */
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
uquad = (u_int64_t)freenum;
- NFSQUOTABLKTOBYTE(uquad, fs.f_bsize);
+ NFSQUOTABLKTOBYTE(uquad, fs->f_bsize);
txdr_hyper(uquad, tl);
retnum += NFSX_HYPER;
break;
case NFSATTRBIT_QUOTASOFT:
if (priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
- freenum = fs.f_bfree;
+ freenum = fs->f_bfree;
else
- freenum = fs.f_bavail;
+ freenum = fs->f_bavail;
#ifdef QUOTA
/*
* ufs_quotactl() insists that the uid argument
@@ -2403,7 +2406,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
#endif /* QUOTA */
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
uquad = (u_int64_t)freenum;
- NFSQUOTABLKTOBYTE(uquad, fs.f_bsize);
+ NFSQUOTABLKTOBYTE(uquad, fs->f_bsize);
txdr_hyper(uquad, tl);
retnum += NFSX_HYPER;
break;
@@ -2424,7 +2427,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
#endif /* QUOTA */
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
uquad = (u_int64_t)freenum;
- NFSQUOTABLKTOBYTE(uquad, fs.f_bsize);
+ NFSQUOTABLKTOBYTE(uquad, fs->f_bsize);
txdr_hyper(uquad, tl);
retnum += NFSX_HYPER;
break;
@@ -2437,24 +2440,24 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
case NFSATTRBIT_SPACEAVAIL:
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
- uquad = (u_int64_t)fs.f_bfree;
+ uquad = (u_int64_t)fs->f_bfree;
else
- uquad = (u_int64_t)fs.f_bavail;
- uquad *= fs.f_bsize;
+ uquad = (u_int64_t)fs->f_bavail;
+ uquad *= fs->f_bsize;
txdr_hyper(uquad, tl);
retnum += NFSX_HYPER;
break;
case NFSATTRBIT_SPACEFREE:
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
- uquad = (u_int64_t)fs.f_bfree;
- uquad *= fs.f_bsize;
+ uquad = (u_int64_t)fs->f_bfree;
+ uquad *= fs->f_bsize;
txdr_hyper(uquad, tl);
retnum += NFSX_HYPER;
break;
case NFSATTRBIT_SPACETOTAL:
NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
- uquad = (u_int64_t)fs.f_blocks;
- uquad *= fs.f_bsize;
+ uquad = (u_int64_t)fs->f_blocks;
+ uquad *= fs->f_bsize;
txdr_hyper(uquad, tl);
retnum += NFSX_HYPER;
break;
@@ -2531,6 +2534,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
}
if (naclp != NULL)
acl_free(naclp);
+ free(fs, M_STATFS);
*retnump = txdr_unsigned(retnum);
return (retnum + prefixnum);
}
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index 11de5c2..663c4f4 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -3135,8 +3135,8 @@ nfs_print(struct vop_print_args *ap)
struct vnode *vp = ap->a_vp;
struct nfsnode *np = VTONFS(vp);
- printf("\tfileid %ld fsid 0x%x", np->n_vattr.na_fileid,
- np->n_vattr.na_fsid);
+ printf("\tfileid %jd fsid 0x%jx", (uintmax_t)np->n_vattr.na_fileid,
+ (uintmax_t)np->n_vattr.na_fsid);
if (vp->v_type == VFIFO)
fifo_printinfo(vp);
printf("\n");
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index a53fb5a..0704808 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -2018,25 +2018,17 @@ again:
}
/*
- * Check to see if entries in this directory can be safely acquired
- * via VFS_VGET() or if a switch to VOP_LOOKUP() is required.
- * ZFS snapshot directories need VOP_LOOKUP(), so that any
- * automount of the snapshot directory that is required will
- * be done.
- * This needs to be done here for NFSv4, since NFSv4 never does
- * a VFS_VGET() for "." or "..".
+ * For now ZFS requires VOP_LOOKUP as a workaround. Until ino_t is changed
+ * to 64 bit type a ZFS filesystem with over 1 billion files in it
+ * will suffer from 64bit -> 32bit truncation.
*/
- if (is_zfs == 1) {
- r = VFS_VGET(mp, at.na_fileid, LK_SHARED, &nvp);
- if (r == EOPNOTSUPP) {
- usevget = 0;
- cn.cn_nameiop = LOOKUP;
- cn.cn_lkflags = LK_SHARED | LK_RETRY;
- cn.cn_cred = nd->nd_cred;
- cn.cn_thread = p;
- } else if (r == 0)
- vput(nvp);
- }
+ if (is_zfs == 1)
+ usevget = 0;
+
+ cn.cn_nameiop = LOOKUP;
+ cn.cn_lkflags = LK_SHARED | LK_RETRY;
+ cn.cn_cred = nd->nd_cred;
+ cn.cn_thread = p;
/*
* Save this position, in case there is an error before one entry
@@ -2105,16 +2097,7 @@ again:
else
r = EOPNOTSUPP;
if (r == EOPNOTSUPP) {
- if (usevget) {
- usevget = 0;
- cn.cn_nameiop = LOOKUP;
- cn.cn_lkflags =
- LK_SHARED |
- LK_RETRY;
- cn.cn_cred =
- nd->nd_cred;
- cn.cn_thread = p;
- }
+ usevget = 0;
cn.cn_nameptr = dp->d_name;
cn.cn_namelen = nlen;
cn.cn_flags = ISLASTCN |
diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c
index 54fad67..8102c58 100644
--- a/sys/fs/nfsserver/nfs_nfsdserv.c
+++ b/sys/fs/nfsserver/nfs_nfsdserv.c
@@ -2035,14 +2035,14 @@ nfsrvd_statfs(struct nfsrv_descript *nd, __unused int isdgram,
u_int32_t *tl;
int getret = 1;
struct nfsvattr at;
- struct statfs sfs;
u_quad_t tval;
+ sf = NULL;
if (nd->nd_repstat) {
nfsrv_postopattr(nd, getret, &at);
goto out;
}
- sf = &sfs;
+ sf = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
nd->nd_repstat = nfsvno_statfs(vp, sf);
getret = nfsvno_getattr(vp, &at, nd->nd_cred, p, 1);
vput(vp);
@@ -2078,6 +2078,7 @@ nfsrvd_statfs(struct nfsrv_descript *nd, __unused int isdgram,
}
out:
+ free(sf, M_STATFS);
NFSEXITCODE2(0, nd);
return (0);
}
@@ -3603,19 +3604,20 @@ nfsrvd_verify(struct nfsrv_descript *nd, int isdgram,
{
int error = 0, ret, fhsize = NFSX_MYFH;
struct nfsvattr nva;
- struct statfs sf;
+ struct statfs *sf;
struct nfsfsinfo fs;
fhandle_t fh;
+ sf = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, p, 1);
if (!nd->nd_repstat)
- nd->nd_repstat = nfsvno_statfs(vp, &sf);
+ nd->nd_repstat = nfsvno_statfs(vp, sf);
if (!nd->nd_repstat)
nd->nd_repstat = nfsvno_getfh(vp, &fh, p);
if (!nd->nd_repstat) {
nfsvno_getfs(&fs, isdgram);
error = nfsv4_loadattr(nd, vp, &nva, NULL, &fh, fhsize, NULL,
- &sf, NULL, &fs, NULL, 1, &ret, NULL, NULL, p, nd->nd_cred);
+ sf, NULL, &fs, NULL, 1, &ret, NULL, NULL, p, nd->nd_cred);
if (!error) {
if (nd->nd_procnum == NFSV4OP_NVERIFY) {
if (ret == 0)
@@ -3627,6 +3629,7 @@ nfsrvd_verify(struct nfsrv_descript *nd, int isdgram,
}
}
vput(vp);
+ free(sf, M_STATFS);
NFSEXITCODE2(error, nd);
return (error);
}
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index de05e8b..87b89da 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -301,29 +301,33 @@ nullfs_statfs(mp, sbp)
struct statfs *sbp;
{
int error;
- struct statfs mstat;
+ struct statfs *mstat;
NULLFSDEBUG("nullfs_statfs(mp = %p, vp = %p->%p)\n", (void *)mp,
(void *)MOUNTTONULLMOUNT(mp)->nullm_rootvp,
(void *)NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp));
- bzero(&mstat, sizeof(mstat));
+ mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO);
- error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, &mstat);
- if (error)
+ error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, mstat);
+ if (error) {
+ free(mstat, M_STATFS);
return (error);
+ }
/* now copy across the "interesting" information and fake the rest */
- sbp->f_type = mstat.f_type;
+ sbp->f_type = mstat->f_type;
sbp->f_flags = (sbp->f_flags & (MNT_RDONLY | MNT_NOEXEC | MNT_NOSUID |
- MNT_UNION | MNT_NOSYMFOLLOW)) | (mstat.f_flags & ~MNT_ROOTFS);
- sbp->f_bsize = mstat.f_bsize;
- sbp->f_iosize = mstat.f_iosize;
- sbp->f_blocks = mstat.f_blocks;
- sbp->f_bfree = mstat.f_bfree;
- sbp->f_bavail = mstat.f_bavail;
- sbp->f_files = mstat.f_files;
- sbp->f_ffree = mstat.f_ffree;
+ MNT_UNION | MNT_NOSYMFOLLOW)) | (mstat->f_flags & ~MNT_ROOTFS);
+ sbp->f_bsize = mstat->f_bsize;
+ sbp->f_iosize = mstat->f_iosize;
+ sbp->f_blocks = mstat->f_blocks;
+ sbp->f_bfree = mstat->f_bfree;
+ sbp->f_bavail = mstat->f_bavail;
+ sbp->f_files = mstat->f_files;
+ sbp->f_ffree = mstat->f_ffree;
+
+ free(mstat, M_STATFS);
return (0);
}
diff --git a/sys/fs/pseudofs/pseudofs_vncache.c b/sys/fs/pseudofs/pseudofs_vncache.c
index 1bec5a4..093d805 100644
--- a/sys/fs/pseudofs/pseudofs_vncache.c
+++ b/sys/fs/pseudofs/pseudofs_vncache.c
@@ -51,6 +51,7 @@ static struct mtx pfs_vncache_mutex;
static struct pfs_vdata *pfs_vncache;
static eventhandler_tag pfs_exit_tag;
static void pfs_exit(void *arg, struct proc *p);
+static void pfs_purge_locked(struct pfs_node *pn, bool force);
static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0,
"pseudofs vnode cache");
@@ -97,6 +98,9 @@ pfs_vncache_unload(void)
{
EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
+ mtx_lock(&pfs_vncache_mutex);
+ pfs_purge_locked(NULL, true);
+ mtx_unlock(&pfs_vncache_mutex);
KASSERT(pfs_vncache_entries == 0,
("%d vncache entries remaining", pfs_vncache_entries));
mtx_destroy(&pfs_vncache_mutex);
@@ -272,7 +276,7 @@ pfs_vncache_free(struct vnode *vp)
* used to implement the cache.
*/
static void
-pfs_purge_locked(struct pfs_node *pn)
+pfs_purge_locked(struct pfs_node *pn, bool force)
{
struct pfs_vdata *pvd;
struct vnode *vnp;
@@ -280,7 +284,8 @@ pfs_purge_locked(struct pfs_node *pn)
mtx_assert(&pfs_vncache_mutex, MA_OWNED);
pvd = pfs_vncache;
while (pvd != NULL) {
- if (pvd->pvd_dead || (pn != NULL && pvd->pvd_pn == pn)) {
+ if (force || pvd->pvd_dead ||
+ (pn != NULL && pvd->pvd_pn == pn)) {
vnp = pvd->pvd_vnode;
vhold(vnp);
mtx_unlock(&pfs_vncache_mutex);
@@ -301,7 +306,7 @@ pfs_purge(struct pfs_node *pn)
{
mtx_lock(&pfs_vncache_mutex);
- pfs_purge_locked(pn);
+ pfs_purge_locked(pn, false);
mtx_unlock(&pfs_vncache_mutex);
}
@@ -321,6 +326,6 @@ pfs_exit(void *arg, struct proc *p)
if (pvd->pvd_pid == p->p_pid)
dead = pvd->pvd_dead = 1;
if (dead)
- pfs_purge_locked(NULL);
+ pfs_purge_locked(NULL, false);
mtx_unlock(&pfs_vncache_mutex);
}
diff --git a/sys/fs/tmpfs/tmpfs.h b/sys/fs/tmpfs/tmpfs.h
index b077489..37e5bbb 100644
--- a/sys/fs/tmpfs/tmpfs.h
+++ b/sys/fs/tmpfs/tmpfs.h
@@ -80,8 +80,10 @@ struct tmpfs_dirent {
uint32_t td_hash;
u_int td_namelen;
- /* Pointer to the node this entry refers to. In case this field
- * is NULL, the node is a whiteout. */
+ /*
+ * Pointer to the node this entry refers to. In case this field
+ * is NULL, the node is a whiteout.
+ */
struct tmpfs_node * td_node;
union {
@@ -94,21 +96,24 @@ struct tmpfs_dirent {
} ud;
};
-/* A directory in tmpfs holds a list of directory entries, which in
- * turn point to other files (which can be directories themselves).
+/*
+ * A directory in tmpfs holds a collection of directory entries, which
+ * in turn point to other files (which can be directories themselves).
*
- * In tmpfs, this list is managed by a RB-Tree, whose head is defined by
- * the struct tmpfs_dir type.
+ * In tmpfs, this collection is managed by a RB-Tree, whose head is
+ * defined by the struct tmpfs_dir type.
*
* It is important to notice that directories do not have entries for . and
* .. as other file systems do. These can be generated when requested
* based on information available by other means, such as the pointer to
* the node itself in the former case or the pointer to the parent directory
* in the latter case. This is done to simplify tmpfs's code and, more
- * importantly, to remove redundancy. */
+ * importantly, to remove redundancy.
+ */
RB_HEAD(tmpfs_dir, tmpfs_dirent);
-/* Each entry in a directory has a cookie that identifies it. Cookies
+/*
+ * Each entry in a directory has a cookie that identifies it. Cookies
* supersede offsets within directories because, given how tmpfs stores
* directories in memory, there is no such thing as an offset.
*
@@ -139,51 +144,67 @@ RB_HEAD(tmpfs_dir, tmpfs_dirent);
* a particular type. The code must be careful to only access those
* attributes that are actually allowed by the node's type.
*
- *
* Below is the key of locks used to protected the fields in the following
* structures.
- *
+ * (v) vnode lock in exclusive mode
+ * (vi) vnode lock in exclusive mode, or vnode lock in shared vnode and
+ * tn_interlock
+ * (i) tn_interlock
+ * (m) tmpfs_mount tm_allnode_lock
+ * (c) stable after creation
*/
struct tmpfs_node {
- /* Doubly-linked list entry which links all existing nodes for a
- * single file system. This is provided to ease the removal of
- * all nodes during the unmount operation. */
- LIST_ENTRY(tmpfs_node) tn_entries;
+ /*
+ * Doubly-linked list entry which links all existing nodes for
+ * a single file system. This is provided to ease the removal
+ * of all nodes during the unmount operation, and to support
+ * the implementation of VOP_VNTOCNP(). tn_attached is false
+ * when the node is removed from list and unlocked.
+ */
+ LIST_ENTRY(tmpfs_node) tn_entries; /* (m) */
+ bool tn_attached; /* (m) */
- /* The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO',
+ /*
+ * The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO',
* 'VLNK', 'VREG' and 'VSOCK' is allowed. The usage of vnode
* types instead of a custom enumeration is to make things simpler
- * and faster, as we do not need to convert between two types. */
- enum vtype tn_type;
+ * and faster, as we do not need to convert between two types.
+ */
+ enum vtype tn_type; /* (c) */
/* Node identifier. */
- ino_t tn_id;
+ ino_t tn_id; /* (c) */
- /* Node's internal status. This is used by several file system
+ /*
+ * Node's internal status. This is used by several file system
* operations to do modifications to the node in a delayed
- * fashion. */
- int tn_status;
+ * fashion.
+ */
+ int tn_status; /* (vi) */
#define TMPFS_NODE_ACCESSED (1 << 1)
#define TMPFS_NODE_MODIFIED (1 << 2)
#define TMPFS_NODE_CHANGED (1 << 3)
- /* The node size. It does not necessarily match the real amount
- * of memory consumed by it. */
- off_t tn_size;
+ /*
+ * The node size. It does not necessarily match the real amount
+ * of memory consumed by it.
+ */
+ off_t tn_size; /* (v) */
/* Generic node attributes. */
- uid_t tn_uid;
- gid_t tn_gid;
- mode_t tn_mode;
- u_long tn_flags;
- nlink_t tn_links;
- struct timespec tn_atime;
- struct timespec tn_mtime;
- struct timespec tn_ctime;
- struct timespec tn_birthtime;
- unsigned long tn_gen;
-
- /* As there is a single vnode for each active file within the
+ uid_t tn_uid; /* (v) */
+ gid_t tn_gid; /* (v) */
+ mode_t tn_mode; /* (v) */
+ u_long tn_flags; /* (v) */
+ nlink_t tn_links; /* (v) */
+ struct timespec tn_atime; /* (vi) */
+ struct timespec tn_mtime; /* (vi) */
+ struct timespec tn_ctime; /* (vi) */
+ struct timespec tn_birthtime; /* (v) */
+ unsigned long tn_gen; /* (c) */
+
+ /*
+ * As there is a single vnode for each active file within the
* system, care has to be taken to avoid allocating more than one
* vnode per file. In order to do this, a bidirectional association
* is kept between vnodes and nodes.
@@ -196,74 +217,84 @@ struct tmpfs_node {
* tn_vnode.
*
* May be NULL when the node is unused (that is, no vnode has been
- * allocated for it or it has been reclaimed). */
- struct vnode * tn_vnode;
+ * allocated for it or it has been reclaimed).
+ */
+ struct vnode * tn_vnode; /* (i) */
- /* interlock to protect tn_vpstate */
+ /*
+ * Interlock to protect tn_vpstate, and tn_status under shared
+ * vnode lock.
+ */
struct mtx tn_interlock;
- /* Identify if current node has vnode assiocate with
+ /*
+ * Identify if current node has vnode assiocate with
* or allocating vnode.
*/
- int tn_vpstate;
+ int tn_vpstate; /* (i) */
+
+ /* Transient refcounter on this node. */
+ u_int tn_refcount; /* (m) + (i) */
/* misc data field for different tn_type node */
union {
/* Valid when tn_type == VBLK || tn_type == VCHR. */
- dev_t tn_rdev;
+ dev_t tn_rdev; /* (c) */
/* Valid when tn_type == VDIR. */
struct tn_dir {
- /* Pointer to the parent directory. The root
+ /*
+ * Pointer to the parent directory. The root
* directory has a pointer to itself in this field;
- * this property identifies the root node. */
+ * this property identifies the root node.
+ */
struct tmpfs_node * tn_parent;
- /* Head of a tree that links the contents of
- * the directory together. */
+ /*
+ * Head of a tree that links the contents of
+ * the directory together.
+ */
struct tmpfs_dir tn_dirhead;
- /* Head of a list the contains fake directory entries
+ /*
+ * Head of a list the contains fake directory entries
* heads, i.e. entries with TMPFS_DIRCOOKIE_DUPHEAD
- * flag. */
+ * flag.
+ */
struct tmpfs_dir_duphead tn_dupindex;
- /* Number and pointer of the first directory entry
+ /*
+ * Number and pointer of the first directory entry
* returned by the readdir operation if it were
* called again to continue reading data from the
* same directory as before. This is used to speed
* up reads of long directories, assuming that no
* more than one read is in progress at a given time.
- * Otherwise, these values are discarded. */
+ * Otherwise, these values are discarded.
+ */
off_t tn_readdir_lastn;
struct tmpfs_dirent * tn_readdir_lastp;
} tn_dir;
/* Valid when tn_type == VLNK. */
/* The link's target, allocated from a string pool. */
- char * tn_link;
+ char * tn_link; /* (c) */
/* Valid when tn_type == VREG. */
struct tn_reg {
- /* The contents of regular files stored in a tmpfs
- * file system are represented by a single anonymous
- * memory object (aobj, for short). The aobj provides
- * direct access to any position within the file,
- * because its contents are always mapped in a
- * contiguous region of virtual memory. It is a task
- * of the memory management subsystem (see uvm(9)) to
- * issue the required page ins or page outs whenever
- * a position within the file is accessed. */
- vm_object_t tn_aobj;
-
- }tn_reg;
-
- /* Valid when tn_type = VFIFO */
- struct tn_fifo {
- fo_rdwr_t *tn_fo_read;
- fo_rdwr_t *tn_fo_write;
- }tn_fifo;
- }tn_spec;
+ /*
+ * The contents of regular files stored in a
+ * tmpfs file system are represented by a
+ * single anonymous memory object (aobj, for
+ * short). The aobj provides direct access to
+ * any position within the file. It is a task
+ * of the memory management subsystem to issue
+ * the required page ins or page outs whenever
+ * a position within the file is accessed.
+ */
+ vm_object_t tn_aobj; /* (c) */
+ } tn_reg;
+ } tn_spec; /* (v) */
};
LIST_HEAD(tmpfs_node_list, tmpfs_node);
@@ -281,21 +312,12 @@ LIST_HEAD(tmpfs_node_list, tmpfs_node);
#ifdef INVARIANTS
#define TMPFS_ASSERT_LOCKED(node) do { \
- MPASS(node != NULL); \
- MPASS(node->tn_vnode != NULL); \
- if (!VOP_ISLOCKED(node->tn_vnode) && \
- !mtx_owned(TMPFS_NODE_MTX(node))) \
- panic("tmpfs: node is not locked: %p", node); \
- } while (0)
-#define TMPFS_ASSERT_ELOCKED(node) do { \
MPASS((node) != NULL); \
MPASS((node)->tn_vnode != NULL); \
- mtx_assert(TMPFS_NODE_MTX(node), MA_OWNED); \
- ASSERT_VOP_LOCKED((node)->tn_vnode, "tmpfs"); \
+ ASSERT_VOP_LOCKED((node)->tn_vnode, "tmpfs assert"); \
} while (0)
#else
#define TMPFS_ASSERT_LOCKED(node) (void)0
-#define TMPFS_ASSERT_ELOCKED(node) (void)0
#endif
#define TMPFS_VNODE_ALLOCATING 1
@@ -307,26 +329,32 @@ LIST_HEAD(tmpfs_node_list, tmpfs_node);
* Internal representation of a tmpfs mount point.
*/
struct tmpfs_mount {
- /* Maximum number of memory pages available for use by the file
+ /*
+ * Maximum number of memory pages available for use by the file
* system, set during mount time. This variable must never be
* used directly as it may be bigger than the current amount of
- * free memory; in the extreme case, it will hold the SIZE_MAX
- * value. */
- size_t tm_pages_max;
+ * free memory; in the extreme case, it will hold the ULONG_MAX
+ * value.
+ */
+ u_long tm_pages_max;
/* Number of pages in use by the file system. */
- size_t tm_pages_used;
+ u_long tm_pages_used;
- /* Pointer to the node representing the root directory of this
- * file system. */
+ /*
+ * Pointer to the node representing the root directory of this
+ * file system.
+ */
struct tmpfs_node * tm_root;
- /* Maximum number of possible nodes for this file system; set
+ /*
+ * Maximum number of possible nodes for this file system; set
* during mount time. We need a hard limit on the maximum number
* of nodes to avoid allocating too much of them; their objects
* cannot be released until the file system is unmounted.
* Otherwise, we could easily run out of memory by creating lots
- * of empty files and then simply removing them. */
+ * of empty files and then simply removing them.
+ */
ino_t tm_nodes_max;
/* unrhdr used to allocate inode numbers */
@@ -335,38 +363,33 @@ struct tmpfs_mount {
/* Number of nodes currently that are in use. */
ino_t tm_nodes_inuse;
+ /* Refcounter on this struct tmpfs_mount. */
+ uint64_t tm_refcount;
+
/* maximum representable file size */
u_int64_t tm_maxfilesize;
- /* Nodes are organized in two different lists. The used list
- * contains all nodes that are currently used by the file system;
- * i.e., they refer to existing files. The available list contains
- * all nodes that are currently available for use by new files.
- * Nodes must be kept in this list (instead of deleting them)
- * because we need to keep track of their generation number (tn_gen
- * field).
- *
- * Note that nodes are lazily allocated: if the available list is
- * empty and we have enough space to create more nodes, they will be
- * created and inserted in the used list. Once these are released,
- * they will go into the available list, remaining alive until the
- * file system is unmounted. */
+ /*
+ * The used list contains all nodes that are currently used by
+ * the file system; i.e., they refer to existing files.
+ */
struct tmpfs_node_list tm_nodes_used;
- /* All node lock to protect the node list and tmp_pages_used */
- struct mtx allnode_lock;
+ /* All node lock to protect the node list and tmp_pages_used. */
+ struct mtx tm_allnode_lock;
- /* Pools used to store file system meta data. These are not shared
- * across several instances of tmpfs for the reasons described in
- * tmpfs_pool.c. */
+ /* Zones used to store file system meta data, per tmpfs mount. */
uma_zone_t tm_dirent_pool;
uma_zone_t tm_node_pool;
/* Read-only status. */
- int tm_ronly;
+ bool tm_ronly;
+ /* Do not use namecache. */
+ bool tm_nonc;
};
-#define TMPFS_LOCK(tm) mtx_lock(&(tm)->allnode_lock)
-#define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->allnode_lock)
+#define TMPFS_LOCK(tm) mtx_lock(&(tm)->tm_allnode_lock)
+#define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->tm_allnode_lock)
+#define TMPFS_MP_ASSERT_LOCKED(tm) mtx_assert(&(tm)->tm_allnode_lock, MA_OWNED)
/*
* This structure maps a file identifier to a tmpfs node. Used by the
@@ -379,15 +402,24 @@ struct tmpfs_fid {
unsigned long tf_gen;
};
+struct tmpfs_dir_cursor {
+ struct tmpfs_dirent *tdc_current;
+ struct tmpfs_dirent *tdc_tree;
+};
+
#ifdef _KERNEL
/*
* Prototypes for tmpfs_subr.c.
*/
+void tmpfs_ref_node(struct tmpfs_node *node);
+void tmpfs_ref_node_locked(struct tmpfs_node *node);
int tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *, enum vtype,
uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *,
char *, dev_t, struct tmpfs_node **);
void tmpfs_free_node(struct tmpfs_mount *, struct tmpfs_node *);
+bool tmpfs_free_node_locked(struct tmpfs_mount *, struct tmpfs_node *, bool);
+void tmpfs_free_tmp(struct tmpfs_mount *);
int tmpfs_alloc_dirent(struct tmpfs_mount *, struct tmpfs_node *,
const char *, u_int, struct tmpfs_dirent **);
void tmpfs_free_dirent(struct tmpfs_mount *, struct tmpfs_dirent *);
@@ -420,8 +452,13 @@ int tmpfs_chtimes(struct vnode *, struct vattr *, struct ucred *cred,
void tmpfs_itimes(struct vnode *, const struct timespec *,
const struct timespec *);
+void tmpfs_set_status(struct tmpfs_node *node, int status);
void tmpfs_update(struct vnode *);
int tmpfs_truncate(struct vnode *, off_t);
+struct tmpfs_dirent *tmpfs_dir_first(struct tmpfs_node *dnode,
+ struct tmpfs_dir_cursor *dc);
+struct tmpfs_dirent *tmpfs_dir_next(struct tmpfs_node *dnode,
+ struct tmpfs_dir_cursor *dc);
/*
* Convenience macros to simplify some logical expressions.
@@ -447,10 +484,6 @@ int tmpfs_truncate(struct vnode *, off_t);
} while (0)
/*
- * Memory management stuff.
- */
-
-/*
* Amount of memory pages to reserve for the system (e.g., to not use by
* tmpfs).
*/
@@ -467,37 +500,41 @@ size_t tmpfs_pages_used(struct tmpfs_mount *tmp);
* specific ones.
*/
-static inline
-struct tmpfs_mount *
+static inline struct tmpfs_mount *
VFS_TO_TMPFS(struct mount *mp)
{
struct tmpfs_mount *tmp;
- MPASS((mp) != NULL && (mp)->mnt_data != NULL);
- tmp = (struct tmpfs_mount *)(mp)->mnt_data;
- return tmp;
+ MPASS(mp != NULL && mp->mnt_data != NULL);
+ tmp = (struct tmpfs_mount *)mp->mnt_data;
+ return (tmp);
}
-static inline
-struct tmpfs_node *
+static inline struct tmpfs_node *
VP_TO_TMPFS_NODE(struct vnode *vp)
{
struct tmpfs_node *node;
- MPASS((vp) != NULL && (vp)->v_data != NULL);
+ MPASS(vp != NULL && vp->v_data != NULL);
node = (struct tmpfs_node *)vp->v_data;
- return node;
+ return (node);
}
-static inline
-struct tmpfs_node *
+static inline struct tmpfs_node *
VP_TO_TMPFS_DIR(struct vnode *vp)
{
struct tmpfs_node *node;
node = VP_TO_TMPFS_NODE(vp);
TMPFS_VALIDATE_DIR(node);
- return node;
+ return (node);
+}
+
+static inline bool
+tmpfs_use_nc(struct vnode *vp)
+{
+
+ return (!(VFS_TO_TMPFS(vp->v_mount)->tm_nonc));
}
#endif /* _FS_TMPFS_TMPFS_H_ */
diff --git a/sys/fs/tmpfs/tmpfs_fifoops.c b/sys/fs/tmpfs/tmpfs_fifoops.c
index 89ebe85..f1743db 100644
--- a/sys/fs/tmpfs/tmpfs_fifoops.c
+++ b/sys/fs/tmpfs/tmpfs_fifoops.c
@@ -49,35 +49,14 @@
#include <fs/tmpfs/tmpfs_vnops.h>
static int
-tmpfs_fifo_kqfilter(struct vop_kqfilter_args *ap)
-{
- struct vnode *vp;
- struct tmpfs_node *node;
-
- vp = ap->a_vp;
- node = VP_TO_TMPFS_NODE(vp);
-
- switch (ap->a_kn->kn_filter){
- case EVFILT_READ:
- node->tn_status |= TMPFS_NODE_ACCESSED;
- break;
- case EVFILT_WRITE:
- node->tn_status |= TMPFS_NODE_MODIFIED;
- break;
- }
-
- return fifo_specops.vop_kqfilter(ap);
-}
-
-static int
tmpfs_fifo_close(struct vop_close_args *v)
{
struct tmpfs_node *node;
- node = VP_TO_TMPFS_NODE(v->a_vp);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ node = VP_TO_TMPFS_NODE(v->a_vp);
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
tmpfs_update(v->a_vp);
- return fifo_specops.vop_close(v);
+ return (fifo_specops.vop_close(v));
}
/*
@@ -90,6 +69,5 @@ struct vop_vector tmpfs_fifoop_entries = {
.vop_access = tmpfs_access,
.vop_getattr = tmpfs_getattr,
.vop_setattr = tmpfs_setattr,
- .vop_kqfilter = tmpfs_fifo_kqfilter,
};
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 7ce4789..f507807 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -62,11 +62,6 @@ __FBSDID("$FreeBSD$");
#include <fs/tmpfs/tmpfs_fifoops.h>
#include <fs/tmpfs/tmpfs_vnops.h>
-struct tmpfs_dir_cursor {
- struct tmpfs_dirent *tdc_current;
- struct tmpfs_dirent *tdc_tree;
-};
-
SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmpfs file system");
static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
@@ -130,13 +125,33 @@ tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
if (tmpfs_mem_avail() < req_pages)
return (0);
- if (tmp->tm_pages_max != SIZE_MAX &&
+ if (tmp->tm_pages_max != ULONG_MAX &&
tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
return (0);
return (1);
}
+void
+tmpfs_ref_node(struct tmpfs_node *node)
+{
+
+ TMPFS_NODE_LOCK(node);
+ tmpfs_ref_node_locked(node);
+ TMPFS_NODE_UNLOCK(node);
+}
+
+void
+tmpfs_ref_node_locked(struct tmpfs_node *node)
+{
+
+ TMPFS_NODE_ASSERT_LOCKED(node);
+ KASSERT(node->tn_refcount > 0, ("node %p zero refcount", node));
+ KASSERT(node->tn_refcount < UINT_MAX, ("node %p refcount %u", node,
+ node->tn_refcount));
+ node->tn_refcount++;
+}
+
/*
* Allocates a new node of type 'type' inside the 'tmp' mount point, with
* its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
@@ -199,8 +214,8 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
return (EBUSY);
}
- nnode = (struct tmpfs_node *)uma_zalloc_arg(
- tmp->tm_node_pool, tmp, M_WAITOK);
+ nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp,
+ M_WAITOK);
/* Generic initialization. */
nnode->tn_type = type;
@@ -211,6 +226,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
nnode->tn_gid = gid;
nnode->tn_mode = mode;
nnode->tn_id = alloc_unr(tmp->tm_ino_unr);
+ nnode->tn_refcount = 1;
/* Type-specific initialization. */
switch (nnode->tn_type) {
@@ -258,58 +274,65 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
break;
default:
- panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
+ panic("tmpfs_alloc_node: type %p %d", nnode,
+ (int)nnode->tn_type);
}
TMPFS_LOCK(tmp);
LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
+ nnode->tn_attached = true;
tmp->tm_nodes_inuse++;
+ tmp->tm_refcount++;
TMPFS_UNLOCK(tmp);
*node = nnode;
- return 0;
+ return (0);
}
/*
* Destroys the node pointed to by node from the file system 'tmp'.
- * If the node does not belong to the given mount point, the results are
- * unpredicted.
- *
- * If the node references a directory; no entries are allowed because
- * their removal could need a recursive algorithm, something forbidden in
- * kernel space. Furthermore, there is not need to provide such
- * functionality (recursive removal) because the only primitives offered
- * to the user are the removal of empty directories and the deletion of
- * individual files.
- *
- * Note that nodes are not really deleted; in fact, when a node has been
- * allocated, it cannot be deleted during the whole life of the file
- * system. Instead, they are moved to the available list and remain there
- * until reused.
+ * If the node references a directory, no entries are allowed.
*/
void
tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
{
+
+ TMPFS_LOCK(tmp);
+ TMPFS_NODE_LOCK(node);
+ if (!tmpfs_free_node_locked(tmp, node, false)) {
+ TMPFS_NODE_UNLOCK(node);
+ TMPFS_UNLOCK(tmp);
+ }
+}
+
+bool
+tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
+ bool detach)
+{
vm_object_t uobj;
+ TMPFS_MP_ASSERT_LOCKED(tmp);
+ TMPFS_NODE_ASSERT_LOCKED(node);
+ KASSERT(node->tn_refcount > 0, ("node %p refcount zero", node));
+
+ node->tn_refcount--;
+ if (node->tn_attached && (detach || node->tn_refcount == 0)) {
+ MPASS(tmp->tm_nodes_inuse > 0);
+ tmp->tm_nodes_inuse--;
+ LIST_REMOVE(node, tn_entries);
+ node->tn_attached = false;
+ }
+ if (node->tn_refcount > 0)
+ return (false);
+
#ifdef INVARIANTS
- TMPFS_NODE_LOCK(node);
MPASS(node->tn_vnode == NULL);
MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
- TMPFS_NODE_UNLOCK(node);
#endif
-
- TMPFS_LOCK(tmp);
- LIST_REMOVE(node, tn_entries);
- tmp->tm_nodes_inuse--;
+ TMPFS_NODE_UNLOCK(node);
TMPFS_UNLOCK(tmp);
switch (node->tn_type) {
- case VNON:
- /* Do not do anything. VNON is provided to let the
- * allocation routine clean itself easily by avoiding
- * duplicating code in it. */
- /* FALLTHROUGH */
case VBLK:
/* FALLTHROUGH */
case VCHR:
@@ -328,9 +351,7 @@ tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
case VREG:
uobj = node->tn_reg.tn_aobj;
if (uobj != NULL) {
- TMPFS_LOCK(tmp);
- tmp->tm_pages_used -= uobj->size;
- TMPFS_UNLOCK(tmp);
+ atomic_subtract_long(&tmp->tm_pages_used, uobj->size);
KASSERT((uobj->flags & OBJ_TMPFS) == 0,
("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj));
vm_object_deallocate(uobj);
@@ -343,6 +364,9 @@ tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
free_unr(tmp->tm_ino_unr, node->tn_id);
uma_zfree(tmp->tm_node_pool, node);
+ TMPFS_LOCK(tmp);
+ tmpfs_free_tmp(tmp);
+ return (true);
}
static __inline uint32_t
@@ -488,13 +512,16 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
struct vnode **vpp)
{
struct vnode *vp;
+ struct tmpfs_mount *tm;
vm_object_t object;
int error;
error = 0;
-loop:
+ tm = VFS_TO_TMPFS(mp);
TMPFS_NODE_LOCK(node);
-loop1:
+ tmpfs_ref_node_locked(node);
+loop:
+ TMPFS_NODE_ASSERT_LOCKED(node);
if ((vp = node->tn_vnode) != NULL) {
MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
VI_LOCK(vp);
@@ -514,12 +541,14 @@ loop1:
msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
0, "tmpfsE", 0);
}
- goto loop1;
+ goto loop;
}
TMPFS_NODE_UNLOCK(node);
error = vget(vp, lkflag | LK_INTERLOCK, curthread);
- if (error == ENOENT)
+ if (error == ENOENT) {
+ TMPFS_NODE_LOCK(node);
goto loop;
+ }
if (error != 0) {
vp = NULL;
goto out;
@@ -531,6 +560,7 @@ loop1:
*/
if (node->tn_vnode == NULL || node->tn_vnode != vp) {
vput(vp);
+ TMPFS_NODE_LOCK(node);
goto loop;
}
@@ -552,11 +582,9 @@ loop1:
if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
node->tn_vpstate |= TMPFS_VNODE_WANT;
error = msleep((caddr_t) &node->tn_vpstate,
- TMPFS_NODE_MTX(node), PDROP | PCATCH,
- "tmpfs_alloc_vp", 0);
- if (error)
- return error;
-
+ TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
+ if (error != 0)
+ goto out;
goto loop;
} else
node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
@@ -564,7 +592,8 @@ loop1:
TMPFS_NODE_UNLOCK(node);
/* Get a new vnode and associate it with our node. */
- error = getnewvnode("tmpfs", mp, &tmpfs_vnodeop_entries, &vp);
+ error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
+ &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
if (error != 0)
goto unlock;
MPASS(vp != NULL);
@@ -612,7 +641,7 @@ loop1:
VN_LOCK_ASHARE(vp);
error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL);
- if (error)
+ if (error != 0)
vp = NULL;
unlock:
@@ -630,18 +659,19 @@ unlock:
TMPFS_NODE_UNLOCK(node);
out:
- *vpp = vp;
+ if (error == 0) {
+ *vpp = vp;
#ifdef INVARIANTS
- if (error == 0) {
MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
TMPFS_NODE_LOCK(node);
MPASS(*vpp == node->tn_vnode);
TMPFS_NODE_UNLOCK(node);
- }
#endif
+ }
+ tmpfs_free_node(tm, node);
- return error;
+ return (error);
}
/*
@@ -684,7 +714,7 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
struct tmpfs_node *node;
struct tmpfs_node *parent;
- MPASS(VOP_ISLOCKED(dvp));
+ ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
MPASS(cnp->cn_flags & HASBUF);
tmp = VFS_TO_TMPFS(dvp->v_mount);
@@ -709,8 +739,8 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
/* Allocate a node that represents the new file. */
error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
- cnp->cn_cred->cr_uid,
- dnode->tn_gid, vap->va_mode, parent, target, vap->va_rdev, &node);
+ cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
+ target, vap->va_rdev, &node);
if (error != 0)
return (error);
@@ -739,7 +769,7 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
return (0);
}
-static struct tmpfs_dirent *
+struct tmpfs_dirent *
tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
{
struct tmpfs_dirent *de;
@@ -753,7 +783,7 @@ tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
return (dc->tdc_current);
}
-static struct tmpfs_dirent *
+struct tmpfs_dirent *
tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
{
struct tmpfs_dirent *de;
@@ -1093,9 +1123,9 @@ tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
else
error = uiomove(&dent, dent.d_reclen, uio);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
- return error;
+ return (error);
}
/*
@@ -1118,9 +1148,8 @@ tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
* Return ENOENT if the current node is already removed.
*/
TMPFS_ASSERT_LOCKED(node);
- if (node->tn_dir.tn_parent == NULL) {
+ if (node->tn_dir.tn_parent == NULL)
return (ENOENT);
- }
TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
dent.d_fileno = node->tn_dir.tn_parent->tn_id;
@@ -1138,9 +1167,9 @@ tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
else
error = uiomove(&dent, dent.d_reclen, uio);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
- return error;
+ return (error);
}
/*
@@ -1283,7 +1312,7 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, int maxcookies,
node->tn_dir.tn_readdir_lastn = off;
node->tn_dir.tn_readdir_lastp = de;
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
return error;
}
@@ -1413,9 +1442,7 @@ retry:
uobj->size = newpages;
VM_OBJECT_WUNLOCK(uobj);
- TMPFS_LOCK(tmp);
- tmp->tm_pages_used += (newpages - oldpages);
- TMPFS_UNLOCK(tmp);
+ atomic_add_long(&tmp->tm_pages_used, newpages - oldpages);
node->tn_size = newsize;
return (0);
@@ -1458,7 +1485,7 @@ tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chflags");
node = VP_TO_TMPFS_NODE(vp);
@@ -1498,9 +1525,9 @@ tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
node->tn_flags = flags;
node->tn_status |= TMPFS_NODE_CHANGED;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chflags2");
- return 0;
+ return (0);
}
/*
@@ -1514,7 +1541,7 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chmod");
node = VP_TO_TMPFS_NODE(vp);
@@ -1554,9 +1581,9 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
node->tn_status |= TMPFS_NODE_CHANGED;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chmod2");
- return 0;
+ return (0);
}
/*
@@ -1575,7 +1602,7 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
uid_t ouid;
gid_t ogid;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chown");
node = VP_TO_TMPFS_NODE(vp);
@@ -1625,9 +1652,9 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
node->tn_mode &= ~(S_ISUID | S_ISGID);
}
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chown2");
- return 0;
+ return (0);
}
/*
@@ -1642,7 +1669,7 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chsize");
node = VP_TO_TMPFS_NODE(vp);
@@ -1680,9 +1707,9 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
* for us, as will update tn_status; no need to do that here. */
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chsize2");
- return error;
+ return (error);
}
/*
@@ -1697,7 +1724,7 @@ tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chtimes");
node = VP_TO_TMPFS_NODE(vp);
@@ -1726,9 +1753,20 @@ tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
if (vap->va_birthtime.tv_sec != VNOVAL)
node->tn_birthtime = vap->va_birthtime;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chtimes2");
- return 0;
+ return (0);
+}
+
+void
+tmpfs_set_status(struct tmpfs_node *node, int status)
+{
+
+ if ((node->tn_status & status) == status)
+ return;
+ TMPFS_NODE_LOCK(node);
+ node->tn_status |= status;
+ TMPFS_NODE_UNLOCK(node);
}
/* Sync timestamps */
@@ -1739,6 +1777,7 @@ tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
struct tmpfs_node *node;
struct timespec now;
+ ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
node = VP_TO_TMPFS_NODE(vp);
if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
@@ -1746,6 +1785,7 @@ tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
return;
vfs_timestamp(&now);
+ TMPFS_NODE_LOCK(node);
if (node->tn_status & TMPFS_NODE_ACCESSED) {
if (acc == NULL)
acc = &now;
@@ -1756,11 +1796,12 @@ tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
mod = &now;
node->tn_mtime = *mod;
}
- if (node->tn_status & TMPFS_NODE_CHANGED) {
+ if (node->tn_status & TMPFS_NODE_CHANGED)
node->tn_ctime = now;
- }
- node->tn_status &=
- ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
+ node->tn_status &= ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
+ TMPFS_NODE_CHANGED);
+ TMPFS_NODE_UNLOCK(node);
+
/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
random_harvest_queue(node, sizeof(*node), 1, RANDOM_FS_ATIME);
}
@@ -1794,14 +1835,13 @@ tmpfs_truncate(struct vnode *vp, off_t length)
return (EFBIG);
error = tmpfs_reg_resize(vp, length, FALSE);
- if (error == 0) {
+ if (error == 0)
node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
- }
out:
tmpfs_update(vp);
- return error;
+ return (error);
}
static __inline int
diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c
index f1ba960..4b336ba 100644
--- a/sys/fs/tmpfs/tmpfs_vfsops.c
+++ b/sys/fs/tmpfs/tmpfs_vfsops.c
@@ -79,7 +79,7 @@ static void tmpfs_susp_clean(struct mount *);
static const char *tmpfs_opts[] = {
"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
- "union", NULL
+ "union", "nonc", NULL
};
static const char *tmpfs_updateopts[] = {
@@ -138,6 +138,7 @@ tmpfs_mount(struct mount *mp)
struct tmpfs_node *root;
struct thread *td = curthread;
int error;
+ bool nonc;
/* Size counters. */
u_quad_t pages;
off_t nodes_max, size_max, maxfilesize;
@@ -186,11 +187,12 @@ tmpfs_mount(struct mount *mp)
size_max = 0;
if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
maxfilesize = 0;
+ nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
/* Do not allow mounts if we do not have enough memory to preserve
* the minimum reserved pages. */
if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
- return ENOSPC;
+ return (ENOSPC);
/* Get the maximum number of memory pages this file system is
* allowed to use, based on the maximum size the user passed in
@@ -219,37 +221,35 @@ tmpfs_mount(struct mount *mp)
tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
M_TMPFSMNT, M_WAITOK | M_ZERO);
- mtx_init(&tmp->allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
+ mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
tmp->tm_nodes_max = nodes_max;
tmp->tm_nodes_inuse = 0;
+ tmp->tm_refcount = 1;
tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
LIST_INIT(&tmp->tm_nodes_used);
tmp->tm_pages_max = pages;
tmp->tm_pages_used = 0;
- tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->allnode_lock);
+ tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock);
tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
- sizeof(struct tmpfs_dirent),
- NULL, NULL, NULL, NULL,
+ sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
tmp->tm_node_pool = uma_zcreate("TMPFS node",
- sizeof(struct tmpfs_node),
- tmpfs_node_ctor, tmpfs_node_dtor,
- tmpfs_node_init, tmpfs_node_fini,
- UMA_ALIGN_PTR, 0);
+ sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
+ tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
+ tmp->tm_nonc = nonc;
/* Allocate the root node. */
- error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid,
- root_gid, root_mode & ALLPERMS, NULL, NULL,
- VNOVAL, &root);
+ error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
+ root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
if (error != 0 || root == NULL) {
- uma_zdestroy(tmp->tm_node_pool);
- uma_zdestroy(tmp->tm_dirent_pool);
- delete_unrhdr(tmp->tm_ino_unr);
- free(tmp, M_TMPFSMNT);
- return error;
+ uma_zdestroy(tmp->tm_node_pool);
+ uma_zdestroy(tmp->tm_dirent_pool);
+ delete_unrhdr(tmp->tm_ino_unr);
+ free(tmp, M_TMPFSMNT);
+ return (error);
}
KASSERT(root->tn_id == 2,
("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
@@ -257,7 +257,7 @@ tmpfs_mount(struct mount *mp)
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
MNT_IUNLOCK(mp);
mp->mnt_data = tmp;
@@ -308,25 +308,17 @@ tmpfs_unmount(struct mount *mp, int mntflags)
TMPFS_LOCK(tmp);
while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
- TMPFS_UNLOCK(tmp);
+ TMPFS_NODE_LOCK(node);
if (node->tn_type == VDIR)
tmpfs_dir_destroy(tmp, node);
- tmpfs_free_node(tmp, node);
- TMPFS_LOCK(tmp);
+ if (tmpfs_free_node_locked(tmp, node, true))
+ TMPFS_LOCK(tmp);
+ else
+ TMPFS_NODE_UNLOCK(node);
}
- TMPFS_UNLOCK(tmp);
-
- uma_zdestroy(tmp->tm_dirent_pool);
- uma_zdestroy(tmp->tm_node_pool);
- delete_unrhdr(tmp->tm_ino_unr);
- mtx_destroy(&tmp->allnode_lock);
- MPASS(tmp->tm_pages_used == 0);
- MPASS(tmp->tm_nodes_inuse == 0);
-
- /* Throw away the tmpfs_mount structure. */
- free(mp->mnt_data, M_TMPFSMNT);
mp->mnt_data = NULL;
+ tmpfs_free_tmp(tmp);
vfs_write_resume(mp, VR_START_WRITE);
MNT_ILOCK(mp);
@@ -336,52 +328,74 @@ tmpfs_unmount(struct mount *mp, int mntflags)
return (0);
}
+void
+tmpfs_free_tmp(struct tmpfs_mount *tmp)
+{
+
+ MPASS(tmp->tm_refcount > 0);
+ tmp->tm_refcount--;
+ if (tmp->tm_refcount > 0) {
+ TMPFS_UNLOCK(tmp);
+ return;
+ }
+ TMPFS_UNLOCK(tmp);
+
+ uma_zdestroy(tmp->tm_dirent_pool);
+ uma_zdestroy(tmp->tm_node_pool);
+ delete_unrhdr(tmp->tm_ino_unr);
+
+ mtx_destroy(&tmp->tm_allnode_lock);
+ MPASS(tmp->tm_pages_used == 0);
+ MPASS(tmp->tm_nodes_inuse == 0);
+
+ free(tmp, M_TMPFSMNT);
+}
+
static int
tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
{
int error;
- error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
- if (!error)
+ error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
+ if (error == 0)
(*vpp)->v_vflag |= VV_ROOT;
-
- return error;
+ return (error);
}
static int
tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
struct vnode **vpp)
{
- boolean_t found;
struct tmpfs_fid *tfhp;
struct tmpfs_mount *tmp;
struct tmpfs_node *node;
+ int error;
tmp = VFS_TO_TMPFS(mp);
tfhp = (struct tmpfs_fid *)fhp;
if (tfhp->tf_len != sizeof(struct tmpfs_fid))
- return EINVAL;
+ return (EINVAL);
if (tfhp->tf_id >= tmp->tm_nodes_max)
- return EINVAL;
-
- found = FALSE;
+ return (EINVAL);
TMPFS_LOCK(tmp);
LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
if (node->tn_id == tfhp->tf_id &&
node->tn_gen == tfhp->tf_gen) {
- found = TRUE;
+ tmpfs_ref_node(node);
break;
}
}
TMPFS_UNLOCK(tmp);
- if (found)
- return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
-
- return (EINVAL);
+ if (node != NULL) {
+ error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
+ tmpfs_free_node(tmp, node);
+ } else
+ error = EINVAL;
+ return (error);
}
/* ARGSUSED2 */
@@ -397,7 +411,7 @@ tmpfs_statfs(struct mount *mp, struct statfs *sbp)
sbp->f_bsize = PAGE_SIZE;
used = tmpfs_pages_used(tmp);
- if (tmp->tm_pages_max != SIZE_MAX)
+ if (tmp->tm_pages_max != ULONG_MAX)
sbp->f_blocks = tmp->tm_pages_max;
else
sbp->f_blocks = used + tmpfs_mem_avail();
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index c302771..a2b0121 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -76,13 +76,11 @@ tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
}
static int
-tmpfs_lookup(struct vop_cachedlookup_args *v)
+tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
{
- struct vnode *dvp = v->a_dvp;
- struct vnode **vpp = v->a_vpp;
- struct componentname *cnp = v->a_cnp;
struct tmpfs_dirent *de;
- struct tmpfs_node *dnode;
+ struct tmpfs_node *dnode, *pnode;
+ struct tmpfs_mount *tm;
int error;
dnode = VP_TO_TMPFS_DIR(dvp);
@@ -104,8 +102,12 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
goto out;
}
if (cnp->cn_flags & ISDOTDOT) {
+ tm = VFS_TO_TMPFS(dvp->v_mount);
+ pnode = dnode->tn_dir.tn_parent;
+ tmpfs_ref_node(pnode);
error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
- dnode->tn_dir.tn_parent, cnp->cn_lkflags, vpp);
+ pnode, cnp->cn_lkflags, vpp);
+ tmpfs_free_node(tm, pnode);
if (error != 0)
goto out;
} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
@@ -117,10 +119,12 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
if (de != NULL && de->td_node == NULL)
cnp->cn_flags |= ISWHITEOUT;
if (de == NULL || de->td_node == NULL) {
- /* The entry was not found in the directory.
+ /*
+ * The entry was not found in the directory.
* This is OK if we are creating or renaming an
* entry and are working on the last component of
- * the path name. */
+ * the path name.
+ */
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_nameiop == CREATE || \
cnp->cn_nameiop == RENAME ||
@@ -132,8 +136,10 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
if (error != 0)
goto out;
- /* Keep the component name in the buffer for
- * future uses. */
+ /*
+ * Keep the component name in the buffer for
+ * future uses.
+ */
cnp->cn_flags |= SAVENAME;
error = EJUSTRETURN;
@@ -142,14 +148,18 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
} else {
struct tmpfs_node *tnode;
- /* The entry was found, so get its associated
- * tmpfs_node. */
+ /*
+ * The entry was found, so get its associated
+ * tmpfs_node.
+ */
tnode = de->td_node;
- /* If we are not at the last path component and
+ /*
+ * If we are not at the last path component and
* found a non-directory or non-link entry (which
* may itself be pointing to a directory), raise
- * an error. */
+ * an error.
+ */
if ((tnode->tn_type != VDIR &&
tnode->tn_type != VLNK) &&
!(cnp->cn_flags & ISLASTCN)) {
@@ -157,9 +167,11 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
goto out;
}
- /* If we are deleting or renaming the entry, keep
+ /*
+ * If we are deleting or renaming the entry, keep
* track of its tmpfs_dirent so that it can be
- * easily deleted later. */
+ * easily deleted later.
+ */
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_nameiop == DELETE ||
cnp->cn_nameiop == RENAME)) {
@@ -175,8 +187,9 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
goto out;
if ((dnode->tn_mode & S_ISTXT) &&
- VOP_ACCESS(dvp, VADMIN, cnp->cn_cred, cnp->cn_thread) &&
- VOP_ACCESS(*vpp, VADMIN, cnp->cn_cred, cnp->cn_thread)) {
+ VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
+ cnp->cn_thread) && VOP_ACCESS(*vpp, VADMIN,
+ cnp->cn_cred, cnp->cn_thread)) {
error = EPERM;
vput(*vpp);
*vpp = NULL;
@@ -192,18 +205,36 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
}
}
- /* Store the result of this lookup in the cache. Avoid this if the
+ /*
+ * Store the result of this lookup in the cache. Avoid this if the
* request was for creation, as it does not improve timings on
- * emprical tests. */
- if ((cnp->cn_flags & MAKEENTRY) != 0)
+ * emprical tests.
+ */
+ if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
cache_enter(dvp, *vpp, cnp);
out:
- /* If there were no errors, *vpp cannot be null and it must be
- * locked. */
+ /*
+ * If there were no errors, *vpp cannot be null and it must be
+ * locked.
+ */
MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));
- return error;
+ return (error);
+}
+
+static int
+tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
+{
+
+ return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
+}
+
+static int
+tmpfs_lookup(struct vop_lookup_args *v)
+{
+
+ return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
}
static int
@@ -218,7 +249,7 @@ tmpfs_create(struct vop_create_args *v)
MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
- if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
+ if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
cache_enter(dvp, *vpp, cnp);
return (error);
}
@@ -445,7 +476,7 @@ tmpfs_read(struct vop_read_args *v)
if (uio->uio_offset < 0)
return (EINVAL);
node = VP_TO_TMPFS_NODE(vp);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
}
@@ -993,10 +1024,12 @@ tmpfs_rename(struct vop_rename_args *v)
tmpfs_dir_attach(tdvp, de);
- cache_purge(fvp);
- if (tvp != NULL)
- cache_purge(tvp);
- cache_purge_negative(tdvp);
+ if (tmpfs_use_nc(fvp)) {
+ cache_purge(fvp);
+ if (tvp != NULL)
+ cache_purge(tvp);
+ cache_purge_negative(tdvp);
+ }
error = 0;
@@ -1082,8 +1115,8 @@ tmpfs_rmdir(struct vop_rmdir_args *v)
v->a_cnp->cn_namelen));
/* Check flags to see if we are allowed to remove the directory. */
- if (dnode->tn_flags & APPEND
- || node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
+ if ((dnode->tn_flags & APPEND) != 0 ||
+ (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
error = EPERM;
goto out;
}
@@ -1096,23 +1129,23 @@ tmpfs_rmdir(struct vop_rmdir_args *v)
/* No vnode should be allocated for this entry from this point */
TMPFS_NODE_LOCK(node);
- TMPFS_ASSERT_ELOCKED(node);
node->tn_links--;
node->tn_dir.tn_parent = NULL;
- node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
+ node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
TMPFS_NODE_MODIFIED;
TMPFS_NODE_UNLOCK(node);
TMPFS_NODE_LOCK(dnode);
- TMPFS_ASSERT_ELOCKED(dnode);
dnode->tn_links--;
- dnode->tn_status |= TMPFS_NODE_ACCESSED | \
- TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
+ dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
+ TMPFS_NODE_MODIFIED;
TMPFS_NODE_UNLOCK(dnode);
- cache_purge(dvp);
- cache_purge(vp);
+ if (tmpfs_use_nc(dvp)) {
+ cache_purge(dvp);
+ cache_purge(vp);
+ }
/* Free the directory entry we just deleted. Note that the node
* referred by it will not be removed until the vnode is really
@@ -1220,9 +1253,9 @@ tmpfs_readlink(struct vop_readlink_args *v)
error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
uio);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
- return error;
+ return (error);
}
static int
@@ -1256,10 +1289,10 @@ tmpfs_reclaim(struct vop_reclaim_args *v)
else
vnode_destroy_vobject(vp);
vp->v_object = NULL;
- cache_purge(vp);
+ if (tmpfs_use_nc(vp))
+ cache_purge(vp);
TMPFS_NODE_LOCK(node);
- TMPFS_ASSERT_ELOCKED(node);
tmpfs_free_vp(vp);
/* If the node referenced by this vnode was deleted by the user,
@@ -1286,8 +1319,8 @@ tmpfs_print(struct vop_print_args *v)
node = VP_TO_TMPFS_NODE(vp);
- printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %d\n",
- node, node->tn_flags, node->tn_links);
+ printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
+ node, node->tn_flags, (uintmax_t)node->tn_links);
printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
node->tn_mode, node->tn_uid, node->tn_gid,
(intmax_t)node->tn_size, node->tn_status);
@@ -1389,13 +1422,139 @@ tmpfs_whiteout(struct vop_whiteout_args *ap)
}
}
+static int
+tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
+ struct tmpfs_dirent **pde)
+{
+ struct tmpfs_dir_cursor dc;
+ struct tmpfs_dirent *de;
+
+ for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
+ de = tmpfs_dir_next(tnp, &dc)) {
+ if (de->td_node == tn) {
+ *pde = de;
+ return (0);
+ }
+ }
+ return (ENOENT);
+}
+
+static int
+tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
+ struct tmpfs_node *tnp, char *buf, int *buflen, struct vnode **dvp)
+{
+ struct tmpfs_dirent *de;
+ int error, i;
+
+ error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
+ dvp);
+ if (error != 0)
+ return (error);
+ error = tmpfs_vptocnp_dir(tn, tnp, &de);
+ if (error == 0) {
+ i = *buflen;
+ i -= de->td_namelen;
+ if (i < 0) {
+ error = ENOMEM;
+ } else {
+ bcopy(de->ud.td_name, buf + i, de->td_namelen);
+ *buflen = i;
+ }
+ }
+ if (error == 0) {
+ if (vp != *dvp)
+ VOP_UNLOCK(*dvp, 0);
+ } else {
+ if (vp != *dvp)
+ vput(*dvp);
+ else
+ vrele(vp);
+ }
+ return (error);
+}
+
+static int
+tmpfs_vptocnp(struct vop_vptocnp_args *ap)
+{
+ struct vnode *vp, **dvp;
+ struct tmpfs_node *tn, *tnp, *tnp1;
+ struct tmpfs_dirent *de;
+ struct tmpfs_mount *tm;
+ char *buf;
+ int *buflen;
+ int error;
+
+ vp = ap->a_vp;
+ dvp = ap->a_vpp;
+ buf = ap->a_buf;
+ buflen = ap->a_buflen;
+
+ tm = VFS_TO_TMPFS(vp->v_mount);
+ tn = VP_TO_TMPFS_NODE(vp);
+ if (tn->tn_type == VDIR) {
+ tnp = tn->tn_dir.tn_parent;
+ if (tnp == NULL)
+ return (ENOENT);
+ tmpfs_ref_node(tnp);
+ error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
+ buflen, dvp);
+ tmpfs_free_node(tm, tnp);
+ return (error);
+ }
+restart:
+ TMPFS_LOCK(tm);
+ LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
+ if (tnp->tn_type != VDIR)
+ continue;
+ TMPFS_NODE_LOCK(tnp);
+ tmpfs_ref_node_locked(tnp);
+
+ /*
+ * tn_vnode cannot be instantiated while we hold the
+ * node lock, so the directory cannot be changed while
+ * we iterate over it. Do this to avoid instantiating
+ * vnode for directories which cannot point to our
+ * node.
+ */
+ error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
+ &de) : 0;
+
+ if (error == 0) {
+ TMPFS_NODE_UNLOCK(tnp);
+ TMPFS_UNLOCK(tm);
+ error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
+ dvp);
+ if (error == 0) {
+ tmpfs_free_node(tm, tnp);
+ return (0);
+ }
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ tmpfs_free_node(tm, tnp);
+ return (ENOENT);
+ }
+ TMPFS_LOCK(tm);
+ TMPFS_NODE_LOCK(tnp);
+ }
+ if (tmpfs_free_node_locked(tm, tnp, false)) {
+ goto restart;
+ } else {
+ KASSERT(tnp->tn_refcount > 0,
+ ("node %p refcount zero", tnp));
+ tnp1 = LIST_NEXT(tnp, tn_entries);
+ TMPFS_NODE_UNLOCK(tnp);
+ }
+ }
+ TMPFS_UNLOCK(tm);
+ return (ENOENT);
+}
+
/*
- * vnode operations vector used for files stored in a tmpfs file system.
+ * Vnode operations vector used for files stored in a tmpfs file system.
*/
struct vop_vector tmpfs_vnodeop_entries = {
.vop_default = &default_vnodeops,
.vop_lookup = vfs_cache_lookup,
- .vop_cachedlookup = tmpfs_lookup,
+ .vop_cachedlookup = tmpfs_cached_lookup,
.vop_create = tmpfs_create,
.vop_mknod = tmpfs_mknod,
.vop_open = tmpfs_open,
@@ -1421,5 +1580,13 @@ struct vop_vector tmpfs_vnodeop_entries = {
.vop_vptofh = tmpfs_vptofh,
.vop_whiteout = tmpfs_whiteout,
.vop_bmap = VOP_EOPNOTSUPP,
+ .vop_vptocnp = tmpfs_vptocnp,
};
+/*
+ * Same vector for mounts which do not use namecache.
+ */
+struct vop_vector tmpfs_vnodeop_nonc_entries = {
+ .vop_default = &tmpfs_vnodeop_entries,
+ .vop_lookup = tmpfs_lookup,
+};
diff --git a/sys/fs/tmpfs/tmpfs_vnops.h b/sys/fs/tmpfs/tmpfs_vnops.h
index 1e06c13..db614a05 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.h
+++ b/sys/fs/tmpfs/tmpfs_vnops.h
@@ -44,6 +44,7 @@
*/
extern struct vop_vector tmpfs_vnodeop_entries;
+extern struct vop_vector tmpfs_vnodeop_nonc_entries;
vop_access_t tmpfs_access;
vop_getattr_t tmpfs_getattr;
diff --git a/sys/fs/unionfs/union_vfsops.c b/sys/fs/unionfs/union_vfsops.c
index f4b83bb..2ba4f06 100644
--- a/sys/fs/unionfs/union_vfsops.c
+++ b/sys/fs/unionfs/union_vfsops.c
@@ -390,7 +390,7 @@ unionfs_statfs(struct mount *mp, struct statfs *sbp)
{
struct unionfs_mount *ump;
int error;
- struct statfs mstat;
+ struct statfs *mstat;
uint64_t lbsize;
ump = MOUNTTOUNIONFSMOUNT(mp);
@@ -398,39 +398,47 @@ unionfs_statfs(struct mount *mp, struct statfs *sbp)
UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n",
(void *)mp, (void *)ump->um_lowervp, (void *)ump->um_uppervp);
- bzero(&mstat, sizeof(mstat));
+ mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO);
- error = VFS_STATFS(ump->um_lowervp->v_mount, &mstat);
- if (error)
+ error = VFS_STATFS(ump->um_lowervp->v_mount, mstat);
+ if (error) {
+ free(mstat, M_STATFS);
return (error);
+ }
/* now copy across the "interesting" information and fake the rest */
- sbp->f_blocks = mstat.f_blocks;
- sbp->f_files = mstat.f_files;
+ sbp->f_blocks = mstat->f_blocks;
+ sbp->f_files = mstat->f_files;
- lbsize = mstat.f_bsize;
+ lbsize = mstat->f_bsize;
- error = VFS_STATFS(ump->um_uppervp->v_mount, &mstat);
- if (error)
+ error = VFS_STATFS(ump->um_uppervp->v_mount, mstat);
+ if (error) {
+ free(mstat, M_STATFS);
return (error);
+ }
+
/*
* The FS type etc is copy from upper vfs.
* (write able vfs have priority)
*/
- sbp->f_type = mstat.f_type;
- sbp->f_flags = mstat.f_flags;
- sbp->f_bsize = mstat.f_bsize;
- sbp->f_iosize = mstat.f_iosize;
-
- if (mstat.f_bsize != lbsize)
- sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) / mstat.f_bsize;
-
- sbp->f_blocks += mstat.f_blocks;
- sbp->f_bfree = mstat.f_bfree;
- sbp->f_bavail = mstat.f_bavail;
- sbp->f_files += mstat.f_files;
- sbp->f_ffree = mstat.f_ffree;
+ sbp->f_type = mstat->f_type;
+ sbp->f_flags = mstat->f_flags;
+ sbp->f_bsize = mstat->f_bsize;
+ sbp->f_iosize = mstat->f_iosize;
+
+ if (mstat->f_bsize != lbsize)
+ sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) /
+ mstat->f_bsize;
+
+ sbp->f_blocks += mstat->f_blocks;
+ sbp->f_bfree = mstat->f_bfree;
+ sbp->f_bavail = mstat->f_bavail;
+ sbp->f_files += mstat->f_files;
+ sbp->f_ffree = mstat->f_ffree;
+
+ free(mstat, M_STATFS);
return (0);
}
diff --git a/sys/geom/geom_disk.c b/sys/geom/geom_disk.c
index 8c3ec64..f38a61b 100644
--- a/sys/geom/geom_disk.c
+++ b/sys/geom/geom_disk.c
@@ -586,12 +586,12 @@ g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g
* special cases, and there's also a valid range.
*/
sbuf_printf(sb, "%s<rotationrate>", indent);
- if (dp->d_rotation_rate == 0) /* Old drives don't */
- sbuf_printf(sb, "unknown"); /* report RPM. */
- else if (dp->d_rotation_rate == 1) /* Since 0 is used */
- sbuf_printf(sb, "0"); /* above, SSDs use 1. */
- else if ((dp->d_rotation_rate >= 0x041) &&
- (dp->d_rotation_rate <= 0xfffe))
+ if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */
+ sbuf_printf(sb, "unknown"); /* don't report RPM. */
+ else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING)
+ sbuf_printf(sb, "0");
+ else if ((dp->d_rotation_rate >= DISK_RR_MIN) &&
+ (dp->d_rotation_rate <= DISK_RR_MAX))
sbuf_printf(sb, "%u", dp->d_rotation_rate);
else
sbuf_printf(sb, "invalid");
diff --git a/sys/geom/geom_disk.h b/sys/geom/geom_disk.h
index 5fce5b9..642a512 100644
--- a/sys/geom/geom_disk.h
+++ b/sys/geom/geom_disk.h
@@ -119,6 +119,11 @@ struct disk {
#define DISKFLAG_DIRECT_COMPLETION 0x20
#define DISKFLAG_CANZONE 0x80
+#define DISK_RR_UNKNOWN 0
+#define DISK_RR_NON_ROTATING 1
+#define DISK_RR_MIN 0x0401
+#define DISK_RR_MAX 0xfffe
+
struct disk *disk_alloc(void);
void disk_create(struct disk *disk, int version);
void disk_destroy(struct disk *disk);
diff --git a/sys/geom/multipath/g_multipath.c b/sys/geom/multipath/g_multipath.c
index b461747..0c24cd3 100644
--- a/sys/geom/multipath/g_multipath.c
+++ b/sys/geom/multipath/g_multipath.c
@@ -923,6 +923,7 @@ g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp,
struct g_provider *pp;
const char *mpname;
static const char devpf[6] = "/dev/";
+ int error;
g_topology_assert();
@@ -972,10 +973,9 @@ g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp,
return;
}
- /*
- * Now add....
- */
- (void) g_multipath_add_disk(gp, pp);
+ error = g_multipath_add_disk(gp, pp);
+ if (error != 0)
+ gctl_error(req, "Provider addition error: %d", error);
}
static void
diff --git a/sys/geom/vinum/geom_vinum_state.c b/sys/geom/vinum/geom_vinum_state.c
index 568c784..67486da 100644
--- a/sys/geom/vinum/geom_vinum_state.c
+++ b/sys/geom/vinum/geom_vinum_state.c
@@ -183,7 +183,7 @@ gv_set_sd_state(struct gv_sd *s, int newstate, int flags)
* Only do this if we're forced, since it usually is done
* internally, and then we do use the force flag.
*/
- if (!flags & GV_SETSTATE_FORCE)
+ if (!(flags & GV_SETSTATE_FORCE))
return (GV_ERR_SETSTATE);
break;
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 9200d00..15f9851 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -2444,8 +2444,7 @@ i386_kdb_init(void)
}
register_t
-init386(first)
- int first;
+init386(int first)
{
struct gate_descriptor *gdp;
int gsel_tss, metadata_missing, x, pa;
diff --git a/sys/i386/i386/mem.c b/sys/i386/i386/mem.c
index 003f207f..accd092 100644
--- a/sys/i386/i386/mem.c
+++ b/sys/i386/i386/mem.c
@@ -168,10 +168,9 @@ memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
if (offset > cpu_getmaxphyaddr())
return (-1);
*paddr = offset;
- } else if (dev2unit(dev) == CDEV_MINOR_KMEM)
- *paddr = vtophys(offset);
- /* else panic! */
- return (0);
+ return (0);
+ }
+ return (-1);
}
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 83f1bbf..bdc310c 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -508,7 +508,14 @@ pmap_bootstrap(vm_paddr_t firstaddr)
for (i = 1; i < NKPT; i++)
PTD[i] = 0;
- /* Initialize the PAT MSR if present. */
+ /*
+ * Initialize the PAT MSR if present.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment. We assume
+ * that PAT support implies PGE and in reverse, PGE presence
+ * comes with PAT. Both features were added for Pentium Pro.
+ */
pmap_init_pat();
/* Turn on PG_G on kernel page(s) */
@@ -565,7 +572,10 @@ pmap_init_pat(void)
pat_table[PAT_WRITE_PROTECTED] = 3;
pat_table[PAT_UNCACHED] = 3;
- /* Bail if this CPU doesn't implement PAT. */
+ /*
+ * Bail if this CPU doesn't implement PAT.
+ * We assume that PAT support implies PGE.
+ */
if ((cpu_feature & CPUID_PAT) == 0) {
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_index[i] = pat_table[i];
@@ -1284,16 +1294,16 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
return;
#endif
/*
- * Otherwise, do per-cache line flush. Use the mfence
+ * Otherwise, do per-cache line flush. Use the sfence
* instruction to insure that previous stores are
* included in the write-back. The processor
* propagates flush to other processors in the cache
* coherence domain.
*/
- mfence();
+ sfence();
for (; sva < eva; sva += cpu_clflush_line_size)
clflushopt(sva);
- mfence();
+ sfence();
} else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
#ifdef DEV_APIC
@@ -2633,6 +2643,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
vm_paddr_t mptepa;
vm_page_t mpte;
struct spglist free;
+ vm_offset_t sva;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@@ -2655,8 +2666,9 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
- pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
- pmap_invalidate_page(pmap, trunc_4mpage(va));
+ sva = trunc_4mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free);
+ pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
@@ -2827,9 +2839,24 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
/*
* Machines that don't support invlpg, also don't support
* PG_G.
+ *
+ * When workaround_erratum383 is false, a promotion to a 2M/4M
+ * page mapping does not invalidate the 512/1024 4K page mappings
+ * from the TLB. Consequently, at this point, the TLB may
+ * hold both 4K and 2M/4M page mappings. Therefore, the entire
+ * range of addresses must be invalidated here. In contrast,
+ * when workaround_erratum383 is true, a promotion does
+ * invalidate the 512/1024 4K page mappings, and so a single INVLPG
+ * suffices to invalidate the 2M/4M page mapping.
*/
- if (oldpde & PG_G)
- pmap_invalidate_page(kernel_pmap, sva);
+ if ((oldpde & PG_G) != 0) {
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ }
+
pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
if (oldpde & PG_MANAGED) {
pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
@@ -3139,9 +3166,14 @@ retry:
if (newpde != oldpde) {
if (!pde_cmpset(pde, oldpde, newpde))
goto retry;
- if (oldpde & PG_G)
- pmap_invalidate_page(pmap, sva);
- else
+ if (oldpde & PG_G) {
+ /* See pmap_remove_pde() for explanation. */
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ } else
anychanged = TRUE;
}
return (anychanged);
@@ -4231,8 +4263,14 @@ pmap_zero_page(vm_page_t m)
invlcaddr(pc->pc_cmap_addr2);
pagezero(pc->pc_cmap_addr2);
*cmap_pte2 = 0;
- mtx_unlock(&pc->pc_cmap_lock);
+
+ /*
+ * Unpin the thread before releasing the lock. Otherwise the thread
+ * could be rescheduled while still bound to the current CPU, only
+ * to unpin itself immediately upon resuming execution.
+ */
sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
}
/*
@@ -4261,8 +4299,8 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
else
bzero(pc->pc_cmap_addr2 + off, size);
*cmap_pte2 = 0;
- mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
}
/*
@@ -4316,8 +4354,8 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
*cmap_pte1 = 0;
*cmap_pte2 = 0;
- mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
}
int unmapped_buf_allowed = 1;
@@ -4364,8 +4402,8 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
}
*cmap_pte1 = 0;
*cmap_pte2 = 0;
- mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
}
/*
@@ -4953,7 +4991,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
{
pd_entry_t oldpde, *pde;
pt_entry_t *pte;
- vm_offset_t pdnxt;
+ vm_offset_t va, pdnxt;
vm_page_t m;
boolean_t anychanged, pv_lists_locked;
@@ -5014,11 +5052,11 @@ resume:
}
if (pdnxt > eva)
pdnxt = eva;
+ va = pdnxt;
for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
sva += PAGE_SIZE) {
- if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED |
- PG_V))
- continue;
+ if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
+ goto maybe_invlrng;
else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
if (advice == MADV_DONTNEED) {
/*
@@ -5033,12 +5071,21 @@ resume:
} else if ((*pte & PG_A) != 0)
atomic_clear_int((u_int *)pte, PG_A);
else
- continue;
- if ((*pte & PG_G) != 0)
- pmap_invalidate_page(pmap, sva);
- else
+ goto maybe_invlrng;
+ if ((*pte & PG_G) != 0) {
+ if (va == pdnxt)
+ va = sva;
+ } else
anychanged = TRUE;
+ continue;
+maybe_invlrng:
+ if (va != pdnxt) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = pdnxt;
+ }
}
+ if (va != pdnxt)
+ pmap_invalidate_range(pmap, va, sva);
}
if (anychanged)
pmap_invalidate_all(pmap);
@@ -5333,12 +5380,14 @@ pmap_flush_page(vm_page_t m)
eva = sva + PAGE_SIZE;
/*
- * Use mfence despite the ordering implied by
+ * Use mfence or sfence despite the ordering implied by
* mtx_{un,}lock() because clflush on non-Intel CPUs
* and clflushopt are not guaranteed to be ordered by
* any other instruction.
*/
- if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
for (; sva < eva; sva += cpu_clflush_line_size) {
if (useclflushopt)
@@ -5346,11 +5395,13 @@ pmap_flush_page(vm_page_t m)
else
clflush(sva);
}
- if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
*cmap_pte2 = 0;
- mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
} else
pmap_invalidate_cache();
}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index a73b60d..58fba7c 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -176,11 +176,7 @@ alloc_fpusave(int flags)
* ready to run and return to user mode.
*/
void
-cpu_fork(td1, p2, td2, flags)
- register struct thread *td1;
- register struct proc *p2;
- struct thread *td2;
- int flags;
+cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
{
register struct proc *p1;
struct pcb *pcb2;
diff --git a/sys/i386/ibcs2/ibcs2_stat.c b/sys/i386/ibcs2/ibcs2_stat.c
index 55d14af..115c2ae 100644
--- a/sys/i386/ibcs2/ibcs2_stat.c
+++ b/sys/i386/ibcs2/ibcs2_stat.c
@@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/filedesc.h>
#include <sys/jail.h>
#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/malloc.h>
#include <sys/vnode.h>
@@ -108,16 +109,18 @@ ibcs2_statfs(td, uap)
struct thread *td;
struct ibcs2_statfs_args *uap;
{
- struct statfs sf;
+ struct statfs *sf;
char *path;
int error;
CHECKALTEXIST(td, uap->path, &path);
- error = kern_statfs(td, path, UIO_SYSSPACE, &sf);
+ sf = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, path, UIO_SYSSPACE, sf);
free(path, M_TEMP);
- if (error)
- return (error);
- return cvt_statfs(&sf, (caddr_t)uap->buf, uap->len);
+ if (error == 0)
+ error = cvt_statfs(sf, (caddr_t)uap->buf, uap->len);
+ free(sf, M_STATFS);
+ return (error);
}
int
@@ -125,13 +128,15 @@ ibcs2_fstatfs(td, uap)
struct thread *td;
struct ibcs2_fstatfs_args *uap;
{
- struct statfs sf;
+ struct statfs *sf;
int error;
- error = kern_fstatfs(td, uap->fd, &sf);
- if (error)
- return (error);
- return cvt_statfs(&sf, (caddr_t)uap->buf, uap->len);
+ sf = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, uap->fd, sf);
+ if (error == 0)
+ error = cvt_statfs(sf, (caddr_t)uap->buf, uap->len);
+ free(sf, M_STATFS);
+ return (error);
}
int
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index f433194..23be5a3 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -158,6 +158,13 @@ mfence(void)
__asm __volatile("mfence" : : : "memory");
}
+static __inline void
+sfence(void)
+{
+
+ __asm __volatile("sfence" : : : "memory");
+}
+
#ifdef _KERNEL
#define HAVE_INLINE_FFS
diff --git a/sys/i386/isa/npx.c b/sys/i386/isa/npx.c
index 944ebf7..fc953b6 100644
--- a/sys/i386/isa/npx.c
+++ b/sys/i386/isa/npx.c
@@ -550,8 +550,7 @@ SYSINIT(npxinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, npxinitstate, NULL);
* Free coprocessor (if we have it).
*/
void
-npxexit(td)
- struct thread *td;
+npxexit(struct thread *td)
{
critical_enter();
@@ -581,7 +580,7 @@ npxexit(td)
}
int
-npxformat()
+npxformat(void)
{
if (!hw_float)
@@ -961,7 +960,7 @@ npxresume(union savefpu *addr)
}
void
-npxdrop()
+npxdrop(void)
{
struct thread *td;
@@ -1297,8 +1296,7 @@ fpu_clean_state(void)
#endif /* CPU_ENABLE_SSE */
static void
-fpurstor(addr)
- union savefpu *addr;
+fpurstor(union savefpu *addr)
{
#ifdef CPU_ENABLE_SSE
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index b1d6c32..a1a24d1 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -1160,7 +1160,7 @@ struct coredump_params {
static void cb_put_phdr(vm_map_entry_t, void *);
static void cb_size_segment(vm_map_entry_t, void *);
-static int core_write(struct coredump_params *, void *, size_t, off_t,
+static int core_write(struct coredump_params *, const void *, size_t, off_t,
enum uio_seg);
static void each_writable_segment(struct thread *, segment_callback, void *);
static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t,
@@ -1202,7 +1202,14 @@ compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len)
while (len > 0) {
chunk_len = MIN(len, CORE_BUF_SIZE);
- copyin(base, buf, chunk_len);
+
+ /*
+ * We can get EFAULT error here.
+ * In that case zero out the current chunk of the segment.
+ */
+ error = copyin(base, buf, chunk_len);
+ if (error != 0)
+ bzero(buf, chunk_len);
error = gzio_write(p->gzs, buf, chunk_len);
if (error != 0)
break;
@@ -1222,12 +1229,12 @@ core_gz_write(void *base, size_t len, off_t offset, void *arg)
#endif /* GZIO */
static int
-core_write(struct coredump_params *p, void *base, size_t len, off_t offset,
- enum uio_seg seg)
+core_write(struct coredump_params *p, const void *base, size_t len,
+ off_t offset, enum uio_seg seg)
{
- return (vn_rdwr_inchunks(UIO_WRITE, p->vp, base, len, offset,
- seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
+ return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base),
+ len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
p->active_cred, p->file_cred, NULL, p->td));
}
@@ -1235,12 +1242,32 @@ static int
core_output(void *base, size_t len, off_t offset, struct coredump_params *p,
void *tmpbuf)
{
+ int error;
#ifdef GZIO
if (p->gzs != NULL)
return (compress_chunk(p, base, tmpbuf, len));
#endif
- return (core_write(p, base, len, offset, UIO_USERSPACE));
+ /*
+ * EFAULT is a non-fatal error that we can get, for example,
+ * if the segment is backed by a file but extends beyond its
+ * end.
+ */
+ error = core_write(p, base, len, offset, UIO_USERSPACE);
+ if (error == EFAULT) {
+ log(LOG_WARNING, "Failed to fully fault in a core file segment "
+ "at VA %p with size 0x%zx to be written at offset 0x%jx "
+ "for process %s\n", base, len, offset, curproc->p_comm);
+
+ /*
+ * Write a "real" zero byte at the end of the target region
+ * in the case this is the last segment.
+ * The intermediate space will be implicitly zero-filled.
+ */
+ error = core_write(p, zero_region, 1, offset + len - 1,
+ UIO_SYSSPACE);
+ }
+ return (error);
}
/*
diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c
index 45e829c..e5421b9 100644
--- a/sys/kern/init_sysent.c
+++ b/sys/kern/init_sysent.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/kern/syscalls.master 304977 2016-08-29 05:15:43Z kib
+ * created from FreeBSD: stable/11/sys/kern/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
#include "opt_compat.h"
diff --git a/sys/kern/kern_acct.c b/sys/kern/kern_acct.c
index 46e6d9b..035251d 100644
--- a/sys/kern/kern_acct.c
+++ b/sys/kern/kern_acct.c
@@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kthread.h>
#include <sys/limits.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
@@ -552,7 +553,7 @@ encode_long(long val)
static void
acctwatch(void)
{
- struct statfs sb;
+ struct statfs *sp;
sx_assert(&acct_sx, SX_XLOCKED);
@@ -580,21 +581,25 @@ acctwatch(void)
* Stopping here is better than continuing, maybe it will be VBAD
* next time around.
*/
- if (VFS_STATFS(acct_vp->v_mount, &sb) < 0)
+ sp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ if (VFS_STATFS(acct_vp->v_mount, sp) < 0) {
+ free(sp, M_STATFS);
return;
+ }
if (acct_suspended) {
- if (sb.f_bavail > (int64_t)(acctresume * sb.f_blocks /
+ if (sp->f_bavail > (int64_t)(acctresume * sp->f_blocks /
100)) {
acct_suspended = 0;
log(LOG_NOTICE, "Accounting resumed\n");
}
} else {
- if (sb.f_bavail <= (int64_t)(acctsuspend * sb.f_blocks /
+ if (sp->f_bavail <= (int64_t)(acctsuspend * sp->f_blocks /
100)) {
acct_suspended = 1;
log(LOG_NOTICE, "Accounting suspended\n");
}
}
+ free(sp, M_STATFS);
}
/*
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 29a1d4b..cd1ba6f 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -1751,21 +1751,23 @@ falloc_noinstall(struct thread *td, struct file **resultfp)
{
struct file *fp;
int maxuserfiles = maxfiles - (maxfiles / 20);
+ int openfiles_new;
static struct timeval lastfail;
static int curfail;
KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
- if ((openfiles >= maxuserfiles &&
+ openfiles_new = atomic_fetchadd_int(&openfiles, 1) + 1;
+ if ((openfiles_new >= maxuserfiles &&
priv_check(td, PRIV_MAXFILES) != 0) ||
- openfiles >= maxfiles) {
+ openfiles_new >= maxfiles) {
+ atomic_subtract_int(&openfiles, 1);
if (ppsratecheck(&lastfail, &curfail, 1)) {
printf("kern.maxfiles limit exceeded by uid %i, (%s) "
"please see tuning(7).\n", td->td_ucred->cr_ruid, td->td_proc->p_comm);
}
return (ENFILE);
}
- atomic_add_int(&openfiles, 1);
fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
refcount_init(&fp->f_count, 1);
fp->f_cred = crhold(td->td_ucred);
@@ -2477,10 +2479,8 @@ fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
fde = &fdt->fdt_ofiles[fd];
haverights = *cap_rights_fde(fde);
fp = fde->fde_file;
- if (!seq_consistent(fd_seq(fdt, fd), seq)) {
- cpu_spinwait();
+ if (!seq_consistent(fd_seq(fdt, fd), seq))
continue;
- }
#else
fp = fdt->fdt_ofiles[fd].fde_file;
#endif
diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c
index c3d290a..e875170 100644
--- a/sys/kern/kern_procctl.c
+++ b/sys/kern/kern_procctl.c
@@ -243,7 +243,7 @@ reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
return (ECAPMODE);
if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG)
return (EINVAL);
- if ((rk->rk_flags & ~REAPER_KILL_CHILDREN) != 0)
+ if ((rk->rk_flags & ~(REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) != 0)
return (EINVAL);
PROC_UNLOCK(p);
reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index a74a230..ea085a0 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -713,13 +713,13 @@ vpanic(const char *fmt, va_list ap)
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
stop_cpus_hard(other_cpus);
}
+#endif
/*
* Ensure that the scheduler is stopped while panicking, even if panic
* has been entered from kdb.
*/
td->td_stopsched = 1;
-#endif
bootopt = RB_AUTOBOOT;
newpanic = 0;
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index d0009b1..f58acd5 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -206,7 +206,22 @@ critical_exit(void)
if (td->td_critnest == 1) {
td->td_critnest = 0;
+
+ /*
+ * Interrupt handlers execute critical_exit() on
+ * leave, and td_owepreempt may be left set by an
+ * interrupt handler only when td_critnest > 0. If we
+ * are decrementing td_critnest from 1 to 0, read
+ * td_owepreempt after decrementing, to not miss the
+ * preempt. Disallow compiler to reorder operations.
+ */
+ __compiler_membar();
if (td->td_owepreempt && !kdb_active) {
+ /*
+ * Microoptimization: we committed to switch,
+ * disable preemption in interrupt handlers
+ * while spinning for the thread lock.
+ */
td->td_critnest = 1;
thread_lock(td);
td->td_critnest--;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index de939ce..cd3ee1f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -968,8 +968,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
sched_load_rem();
td->td_lastcpu = td->td_oncpu;
- preempted = !((td->td_flags & TDF_SLICEEND) ||
- (flags & SWT_RELINQUISH));
+ preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
+ (flags & SW_PREEMPT) != 0;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
td->td_owepreempt = 0;
td->td_oncpu = NOCPU;
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 8af976b..d83beca 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1898,8 +1898,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
ts->ts_rltick = ticks;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
- preempted = !((td->td_flags & TDF_SLICEEND) ||
- (flags & SWT_RELINQUISH));
+ preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
+ (flags & SW_PREEMPT) != 0;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
td->td_owepreempt = 0;
if (!TD_IS_IDLETHREAD(td))
diff --git a/sys/kern/subr_unit.c b/sys/kern/subr_unit.c
index 5a62762..3d51077 100644
--- a/sys/kern/subr_unit.c
+++ b/sys/kern/subr_unit.c
@@ -216,7 +216,7 @@ ub_full(struct unrb *ub, int len)
* Consistency check function.
*
* Checks the internal consistency as well as we can.
- *
+ *
* Called at all boundaries of this API.
*/
static void
@@ -240,7 +240,7 @@ check_unrhdr(struct unrhdr *uh, int line)
w = 0;
bit_count(ub->map, 0, up->len, &w);
y += w;
- } else if (up->ptr != NULL)
+ } else if (up->ptr != NULL)
y += up->len;
}
KASSERT (y == uh->busy,
@@ -375,7 +375,7 @@ is_bitmap(struct unrhdr *uh, struct unr *up)
/*
* Look for sequence of items which can be combined into a bitmap, if
* multiple are present, take the one which saves most memory.
- *
+ *
* Return (1) if a sequence was found to indicate that another call
* might be able to do more. Return (0) if we found no suitable sequence.
*
@@ -591,7 +591,7 @@ alloc_unrl(struct unrhdr *uh)
}
/*
- * We can always allocate from the first list element, so if we have
+ * We can always allocate from the first list element, so if we have
* nothing on the list, we must have run out of unit numbers.
*/
if (up == NULL)
@@ -803,7 +803,7 @@ free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
/* Handle bitmap items */
if (is_bitmap(uh, up)) {
ub = up->ptr;
-
+
KASSERT(bit_test(ub->map, item) != 0,
("UNR: Freeing free item %d (bitmap)\n", item));
bit_clear(ub->map, item);
@@ -909,7 +909,7 @@ print_unr(struct unrhdr *uh, struct unr *up)
for (x = 0; x < up->len; x++) {
if (bit_test(ub->map, x))
printf("#");
- else
+ else
printf(" ");
}
printf("]\n");
@@ -986,7 +986,7 @@ main(int argc, char **argv)
long count = 10000; /* Number of unrs to test */
long reps = 1, m;
int ch;
- u_int i, x, j;
+ u_int i, j;
verbose = false;
@@ -999,7 +999,7 @@ main(int argc, char **argv)
usage(argv);
exit(2);
}
-
+
break;
case 'v':
verbose = true;
@@ -1026,7 +1026,6 @@ main(int argc, char **argv)
printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
printf("NBITS %lu\n", (unsigned long)NBITS);
- x = 1;
for (m = 0; m < count * reps; m++) {
j = random();
i = (j >> 1) % count;
diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c
index 21c41fa..b0d782e 100644
--- a/sys/kern/syscalls.c
+++ b/sys/kern/syscalls.c
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/kern/syscalls.master 304977 2016-08-29 05:15:43Z kib
+ * created from FreeBSD: stable/11/sys/kern/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
const char *syscallnames[] = {
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 1b6980e..36380bd 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -85,7 +85,7 @@
17 AUE_NULL STD { int obreak(char *nsize); } break \
obreak_args int
18 AUE_GETFSSTAT COMPAT4 { int getfsstat(struct ostatfs *buf, \
- long bufsize, int flags); }
+ long bufsize, int mode); }
19 AUE_LSEEK COMPAT { long lseek(int fd, long offset, \
int whence); }
20 AUE_GETPID STD { pid_t getpid(void); }
@@ -707,7 +707,7 @@
394 AUE_NULL STD { int mac_syscall(const char *policy, \
int call, void *arg); }
395 AUE_GETFSSTAT STD { int getfsstat(struct statfs *buf, \
- long bufsize, int flags); }
+ long bufsize, int mode); }
396 AUE_STATFS STD { int statfs(char *path, \
struct statfs *buf); }
397 AUE_FSTATFS STD { int fstatfs(int fd, struct statfs *buf); }
diff --git a/sys/kern/systrace_args.c b/sys/kern/systrace_args.c
index 52fd73b..ab98c5a 100644
--- a/sys/kern/systrace_args.c
+++ b/sys/kern/systrace_args.c
@@ -2081,7 +2081,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
struct getfsstat_args *p = params;
uarg[0] = (intptr_t) p->buf; /* struct statfs * */
iarg[1] = p->bufsize; /* long */
- iarg[2] = p->flags; /* int */
+ iarg[2] = p->mode; /* int */
*n_args = 3;
break;
}
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 8c5ee78..5f103eb 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -169,7 +169,7 @@ CTASSERT(sizeof(struct m_ext) == 28);
* plain pointer does.
*/
#ifdef INVARIANTS
-static struct mbuf m_assertbuf;
+static struct mbuf __used m_assertbuf;
CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
diff --git a/sys/kern/uipc_mbuf2.c b/sys/kern/uipc_mbuf2.c
index a90074e..8b7f4fd 100644
--- a/sys/kern/uipc_mbuf2.c
+++ b/sys/kern/uipc_mbuf2.c
@@ -159,7 +159,7 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp)
* the target data is on <n, off>.
* if we got enough data on the mbuf "n", we're done.
*/
- if ((off == 0 || offp) && len <= n->m_len - off && writable)
+ if ((off == 0 || offp) && len <= n->m_len - off)
goto ok;
/*
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 81e591f..cd4a5ff 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3929,10 +3929,8 @@ biodone(struct bio *bp)
bp->bio_flags |= BIO_DONE;
wakeup(bp);
mtx_unlock(mtxp);
- } else {
- bp->bio_flags |= BIO_DONE;
+ } else
done(bp);
- }
}
/*
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 45aa053..02b8e2b 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -229,8 +229,6 @@ SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsrequeue, CTLFLAG_RW, &ncneghitsrequeue, 0,
struct nchstats nchstats; /* cache effectiveness statistics */
static struct mtx ncneg_shrink_lock;
-MTX_SYSINIT(vfscache_shrink_neg, &ncneg_shrink_lock, "Name Cache shrink neg",
- MTX_DEF);
struct neglist {
struct mtx nl_lock;
@@ -242,30 +240,29 @@ static struct neglist ncneg_hot;
static int shrink_list_turn;
-static u_int numneglists;
+#define numneglists (ncneghash + 1)
+static u_int ncneghash;
static inline struct neglist *
NCP2NEGLIST(struct namecache *ncp)
{
- return (&neglists[(((uintptr_t)(ncp) >> 8) % numneglists)]);
+ return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
}
-static u_int numbucketlocks;
+#define numbucketlocks (ncbuckethash + 1)
+static u_int ncbuckethash;
static struct rwlock_padalign *bucketlocks;
#define HASH2BUCKETLOCK(hash) \
- ((struct rwlock *)(&bucketlocks[((hash) % numbucketlocks)]))
+ ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
-static u_int numvnodelocks;
+#define numvnodelocks (ncvnodehash + 1)
+static u_int ncvnodehash;
static struct mtx *vnodelocks;
static inline struct mtx *
VP2VNODELOCK(struct vnode *vp)
{
- struct mtx *vlp;
- if (vp == NULL)
- return (NULL);
- vlp = &vnodelocks[(((uintptr_t)(vp) >> 8) % numvnodelocks)];
- return (vlp);
+ return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
}
/*
@@ -1107,7 +1104,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
uint32_t hash;
int error, ltype;
- if (!doingcache) {
+ if (__predict_false(!doingcache)) {
cnp->cn_flags &= ~MAKEENTRY;
return (0);
}
@@ -1374,8 +1371,8 @@ cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
cache_assert_vlp_locked(cel->vlp[1]);
MPASS(cel->vlp[2] == NULL);
+ MPASS(vp != NULL);
vlp = VP2VNODELOCK(vp);
- MPASS(vlp != NULL);
ret = true;
if (vlp >= cel->vlp[1]) {
@@ -1547,13 +1544,13 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
("cache_enter: Doomed vnode used as src"));
- if (!doingcache)
+ if (__predict_false(!doingcache))
return;
/*
* Avoid blowout in namecache entries.
*/
- if (numcache >= desiredvnodes * ncsizefactor)
+ if (__predict_false(numcache >= desiredvnodes * ncsizefactor))
return;
cache_celockstate_init(&cel);
@@ -1779,21 +1776,21 @@ nchinit(void *dummy __unused)
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
- numbucketlocks = cache_roundup_2(mp_ncpus * 64);
- if (numbucketlocks > nchash + 1)
- numbucketlocks = nchash + 1;
+ ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1;
+ if (ncbuckethash > nchash)
+ ncbuckethash = nchash;
bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
M_WAITOK | M_ZERO);
for (i = 0; i < numbucketlocks; i++)
rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
- numvnodelocks = cache_roundup_2(mp_ncpus * 64);
+ ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1;
vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
M_WAITOK | M_ZERO);
for (i = 0; i < numvnodelocks; i++)
mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
ncpurgeminvnodes = numbucketlocks;
- numneglists = 4;
+ ncneghash = 3;
neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
M_WAITOK | M_ZERO);
for (i = 0; i < numneglists; i++) {
@@ -1803,6 +1800,8 @@ nchinit(void *dummy __unused)
mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
TAILQ_INIT(&ncneg_hot.nl_list);
+ mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
+
numcalls = counter_u64_alloc(M_WAITOK);
dothits = counter_u64_alloc(M_WAITOK);
dotdothits = counter_u64_alloc(M_WAITOK);
@@ -2055,9 +2054,9 @@ kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen,
struct vnode *cdir, *rdir;
int error;
- if (disablecwd)
+ if (__predict_false(disablecwd))
return (ENODEV);
- if (buflen < 2)
+ if (__predict_false(buflen < 2))
return (EINVAL);
if (buflen > path_max)
buflen = path_max;
@@ -2108,9 +2107,9 @@ vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
struct vnode *rdir;
int error;
- if (disablefullpath)
+ if (__predict_false(disablefullpath))
return (ENODEV);
- if (vn == NULL)
+ if (__predict_false(vn == NULL))
return (EINVAL);
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
@@ -2142,9 +2141,9 @@ vn_fullpath_global(struct thread *td, struct vnode *vn,
char *buf;
int error;
- if (disablefullpath)
+ if (__predict_false(disablefullpath))
return (ENODEV);
- if (vn == NULL)
+ if (__predict_false(vn == NULL))
return (EINVAL);
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
@@ -2408,7 +2407,7 @@ vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
ASSERT_VOP_ELOCKED(vp, __func__);
/* Return ENODEV if sysctl debug.disablefullpath==1 */
- if (disablefullpath)
+ if (__predict_false(disablefullpath))
return (ENODEV);
/* Construct global filesystem path from vp. */
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index b4e5a9d..a07dd32 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -931,7 +931,8 @@ int
vop_stdallocate(struct vop_allocate_args *ap)
{
#ifdef __notyet__
- struct statfs sfs;
+ struct statfs *sfs;
+ off_t maxfilesize = 0;
#endif
struct iovec aiov;
struct vattr vattr, *vap;
@@ -967,12 +968,16 @@ vop_stdallocate(struct vop_allocate_args *ap)
* Check if the filesystem sets f_maxfilesize; if not use
* VOP_SETATTR to perform the check.
*/
- error = VFS_STATFS(vp->v_mount, &sfs, td);
+ sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = VFS_STATFS(vp->v_mount, sfs, td);
+ if (error == 0)
+ maxfilesize = sfs->f_maxfilesize;
+ free(sfs, M_STATFS);
if (error != 0)
goto out;
- if (sfs.f_maxfilesize) {
- if (offset > sfs.f_maxfilesize || len > sfs.f_maxfilesize ||
- offset + len > sfs.f_maxfilesize) {
+ if (maxfilesize) {
+ if (offset > maxfilesize || len > maxfilesize ||
+ offset + len > maxfilesize) {
error = EFBIG;
goto out;
}
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 11382da..be565a9 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -79,6 +79,7 @@ SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
"Unprivileged users may mount and unmount file systems");
MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
+MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure");
static uma_zone_t mount_zone;
/* List of mounted filesystems. */
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index d9813e3..382c0bd 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/condvar.h>
#include <sys/conf.h>
+#include <sys/counter.h>
#include <sys/dirent.h>
#include <sys/event.h>
#include <sys/eventhandler.h>
@@ -123,9 +124,9 @@ static unsigned long numvnodes;
SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
"Number of vnodes in existence");
-static u_long vnodes_created;
-SYSCTL_ULONG(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
- 0, "Number of vnodes created by getnewvnode");
+static counter_u64_t vnodes_created;
+SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
+ "Number of vnodes created by getnewvnode");
/*
* Conversion tables for conversion from vnode types to inode formats
@@ -175,8 +176,8 @@ static u_long freevnodes;
SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD,
&freevnodes, 0, "Number of \"free\" vnodes");
-static u_long recycles_count;
-SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 0,
+static counter_u64_t recycles_count;
+SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count,
"Number of vnodes recycled to meet vnode cache targets");
/*
@@ -188,8 +189,8 @@ static int reassignbufcalls;
SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0,
"Number of calls to reassignbuf");
-static u_long free_owe_inact;
-SYSCTL_ULONG(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 0,
+static counter_u64_t free_owe_inact;
+SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact,
"Number of times free vnodes kept on active list due to VFS "
"owing inactivation");
@@ -472,6 +473,11 @@ vntblinit(void *dummy __unused)
NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
UMA_ZONE_NOFREE | UMA_ZONE_VM);
uma_prealloc(buf_trie_zone, nbuf);
+
+ vnodes_created = counter_u64_alloc(M_WAITOK);
+ recycles_count = counter_u64_alloc(M_WAITOK);
+ free_owe_inact = counter_u64_alloc(M_WAITOK);
+
/*
* Initialize the filesystem syncer.
*/
@@ -918,7 +924,7 @@ vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger)
}
KASSERT((vp->v_iflag & VI_DOOMED) == 0,
("VI_DOOMED unexpectedly detected in vlrureclaim()"));
- atomic_add_long(&recycles_count, 1);
+ counter_u64_add(recycles_count, 1);
vgonel(vp);
VOP_UNLOCK(vp, 0);
vdropl(vp);
@@ -1217,7 +1223,7 @@ vtryrecycle(struct vnode *vp)
return (EBUSY);
}
if ((vp->v_iflag & VI_DOOMED) == 0) {
- atomic_add_long(&recycles_count, 1);
+ counter_u64_add(recycles_count, 1);
vgonel(vp);
}
VOP_UNLOCK(vp, LK_INTERLOCK);
@@ -1376,7 +1382,7 @@ getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
atomic_add_long(&numvnodes, 1);
mtx_unlock(&vnode_free_list_mtx);
alloc:
- atomic_add_long(&vnodes_created, 1);
+ counter_u64_add(vnodes_created, 1);
vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK);
/*
* Locks are given the generic name "vnode" when created.
@@ -2855,7 +2861,7 @@ _vdrop(struct vnode *vp, bool locked)
vp->v_iflag |= VI_FREE;
mtx_unlock(&vnode_free_list_mtx);
} else {
- atomic_add_long(&free_owe_inact, 1);
+ counter_u64_add(free_owe_inact, 1);
}
VI_UNLOCK(vp);
return;
@@ -2942,7 +2948,7 @@ vinactive(struct vnode *vp, struct thread *td)
obj = vp->v_object;
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
VM_OBJECT_WLOCK(obj);
- vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
+ vm_object_page_clean(obj, 0, 0, 0);
VM_OBJECT_WUNLOCK(obj);
}
VOP_INACTIVE(vp, td);
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 5ebfb03..0490694 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -298,12 +298,14 @@ sys_statfs(td, uap)
struct statfs *buf;
} */ *uap;
{
- struct statfs sf;
+ struct statfs *sfp;
int error;
- error = kern_statfs(td, uap->path, UIO_USERSPACE, &sf);
+ sfp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, uap->path, UIO_USERSPACE, sfp);
if (error == 0)
- error = copyout(&sf, uap->buf, sizeof(sf));
+ error = copyout(sfp, uap->buf, sizeof(struct statfs));
+ free(sfp, M_STATFS);
return (error);
}
@@ -344,12 +346,14 @@ sys_fstatfs(td, uap)
struct statfs *buf;
} */ *uap;
{
- struct statfs sf;
+ struct statfs *sfp;
int error;
- error = kern_fstatfs(td, uap->fd, &sf);
+ sfp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, uap->fd, sfp);
if (error == 0)
- error = copyout(&sf, uap->buf, sizeof(sf));
+ error = copyout(sfp, uap->buf, sizeof(struct statfs));
+ free(sfp, M_STATFS);
return (error);
}
@@ -386,7 +390,7 @@ kern_fstatfs(struct thread *td, int fd, struct statfs *buf)
struct getfsstat_args {
struct statfs *buf;
long bufsize;
- int flags;
+ int mode;
};
#endif
int
@@ -395,7 +399,7 @@ sys_getfsstat(td, uap)
register struct getfsstat_args /* {
struct statfs *buf;
long bufsize;
- int flags;
+ int mode;
} */ *uap;
{
size_t count;
@@ -404,7 +408,7 @@ sys_getfsstat(td, uap)
if (uap->bufsize < 0 || uap->bufsize > SIZE_MAX)
return (EINVAL);
error = kern_getfsstat(td, &uap->buf, uap->bufsize, &count,
- UIO_USERSPACE, uap->flags);
+ UIO_USERSPACE, uap->mode);
if (error == 0)
td->td_retval[0] = count;
return (error);
@@ -417,13 +421,20 @@ sys_getfsstat(td, uap)
*/
int
kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
- size_t *countp, enum uio_seg bufseg, int flags)
+ size_t *countp, enum uio_seg bufseg, int mode)
{
struct mount *mp, *nmp;
- struct statfs *sfsp, *sp, sb, *tofree;
+ struct statfs *sfsp, *sp, *sptmp, *tofree;
size_t count, maxcount;
int error;
+ switch (mode) {
+ case MNT_WAIT:
+ case MNT_NOWAIT:
+ break;
+ default:
+ return (EINVAL);
+ }
restart:
maxcount = bufsize / sizeof(struct statfs);
if (bufsize == 0) {
@@ -442,7 +453,7 @@ restart:
if (maxcount > count)
maxcount = count;
tofree = sfsp = *buf = malloc(maxcount * sizeof(struct statfs),
- M_TEMP, M_WAITOK);
+ M_STATFS, M_WAITOK);
}
count = 0;
mtx_lock(&mountlist_mtx);
@@ -457,7 +468,7 @@ restart:
continue;
}
#endif
- if (flags == MNT_WAIT) {
+ if (mode == MNT_WAIT) {
if (vfs_busy(mp, MBF_MNTLSTLOCK) != 0) {
/*
* If vfs_busy() failed, and MBF_NOWAIT
@@ -467,7 +478,7 @@ restart:
* no other choice than to start over.
*/
mtx_unlock(&mountlist_mtx);
- free(tofree, M_TEMP);
+ free(tofree, M_STATFS);
goto restart;
}
} else {
@@ -476,7 +487,7 @@ restart:
continue;
}
}
- if (sfsp && count < maxcount) {
+ if (sfsp != NULL && count < maxcount) {
sp = &mp->mnt_stat;
/*
* Set these in case the underlying filesystem
@@ -486,10 +497,10 @@ restart:
sp->f_namemax = NAME_MAX;
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
/*
- * If MNT_NOWAIT or MNT_LAZY is specified, do not
- * refresh the fsstat cache.
+ * If MNT_NOWAIT is specified, do not refresh
+ * the fsstat cache.
*/
- if (flags != MNT_LAZY && flags != MNT_NOWAIT) {
+ if (mode != MNT_NOWAIT) {
error = VFS_STATFS(mp, sp);
if (error != 0) {
mtx_lock(&mountlist_mtx);
@@ -499,15 +510,20 @@ restart:
}
}
if (priv_check(td, PRIV_VFS_GENERATION)) {
- bcopy(sp, &sb, sizeof(sb));
- sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
- prison_enforce_statfs(td->td_ucred, mp, &sb);
- sp = &sb;
- }
- if (bufseg == UIO_SYSSPACE)
+ sptmp = malloc(sizeof(struct statfs), M_STATFS,
+ M_WAITOK);
+ *sptmp = *sp;
+ sptmp->f_fsid.val[0] = sptmp->f_fsid.val[1] = 0;
+ prison_enforce_statfs(td->td_ucred, mp, sptmp);
+ sp = sptmp;
+ } else
+ sptmp = NULL;
+ if (bufseg == UIO_SYSSPACE) {
bcopy(sp, sfsp, sizeof(*sp));
- else /* if (bufseg == UIO_USERSPACE) */ {
+ free(sptmp, M_STATFS);
+ } else /* if (bufseg == UIO_USERSPACE) */ {
error = copyout(sp, sfsp, sizeof(*sp));
+ free(sptmp, M_STATFS);
if (error != 0) {
vfs_unbusy(mp);
return (error);
@@ -521,7 +537,7 @@ restart:
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
- if (sfsp && count > maxcount)
+ if (sfsp != NULL && count > maxcount)
*countp = maxcount;
else
*countp = count;
@@ -549,14 +565,17 @@ freebsd4_statfs(td, uap)
} */ *uap;
{
struct ostatfs osb;
- struct statfs sf;
+ struct statfs *sfp;
int error;
- error = kern_statfs(td, uap->path, UIO_USERSPACE, &sf);
- if (error != 0)
- return (error);
- cvtstatfs(&sf, &osb);
- return (copyout(&osb, uap->buf, sizeof(osb)));
+ sfp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_statfs(td, uap->path, UIO_USERSPACE, sfp);
+ if (error == 0) {
+ cvtstatfs(sfp, &osb);
+ error = copyout(&osb, uap->buf, sizeof(osb));
+ }
+ free(sfp, M_STATFS);
+ return (error);
}
/*
@@ -577,14 +596,17 @@ freebsd4_fstatfs(td, uap)
} */ *uap;
{
struct ostatfs osb;
- struct statfs sf;
+ struct statfs *sfp;
int error;
- error = kern_fstatfs(td, uap->fd, &sf);
- if (error != 0)
- return (error);
- cvtstatfs(&sf, &osb);
- return (copyout(&osb, uap->buf, sizeof(osb)));
+ sfp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fstatfs(td, uap->fd, sfp);
+ if (error == 0) {
+ cvtstatfs(sfp, &osb);
+ error = copyout(&osb, uap->buf, sizeof(osb));
+ }
+ free(sfp, M_STATFS);
+ return (error);
}
/*
@@ -594,7 +616,7 @@ freebsd4_fstatfs(td, uap)
struct freebsd4_getfsstat_args {
struct ostatfs *buf;
long bufsize;
- int flags;
+ int mode;
};
#endif
int
@@ -603,7 +625,7 @@ freebsd4_getfsstat(td, uap)
register struct freebsd4_getfsstat_args /* {
struct ostatfs *buf;
long bufsize;
- int flags;
+ int mode;
} */ *uap;
{
struct statfs *buf, *sp;
@@ -618,7 +640,7 @@ freebsd4_getfsstat(td, uap)
return (EINVAL);
size = count * sizeof(struct statfs);
error = kern_getfsstat(td, &buf, size, &count, UIO_SYSSPACE,
- uap->flags);
+ uap->mode);
td->td_retval[0] = count;
if (size != 0) {
sp = buf;
@@ -629,7 +651,7 @@ freebsd4_getfsstat(td, uap)
uap->buf++;
count--;
}
- free(buf, M_TEMP);
+ free(buf, M_STATFS);
}
return (error);
}
@@ -652,18 +674,21 @@ freebsd4_fhstatfs(td, uap)
} */ *uap;
{
struct ostatfs osb;
- struct statfs sf;
+ struct statfs *sfp;
fhandle_t fh;
int error;
error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t));
if (error != 0)
return (error);
- error = kern_fhstatfs(td, fh, &sf);
- if (error != 0)
- return (error);
- cvtstatfs(&sf, &osb);
- return (copyout(&osb, uap->buf, sizeof(osb)));
+ sfp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fhstatfs(td, fh, sfp);
+ if (error == 0) {
+ cvtstatfs(sfp, &osb);
+ error = copyout(&osb, uap->buf, sizeof(osb));
+ }
+ free(sfp, M_STATFS);
+ return (error);
}
/*
@@ -4398,17 +4423,19 @@ sys_fhstatfs(td, uap)
struct statfs *buf;
} */ *uap;
{
- struct statfs sf;
+ struct statfs *sfp;
fhandle_t fh;
int error;
error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t));
if (error != 0)
return (error);
- error = kern_fhstatfs(td, fh, &sf);
- if (error != 0)
- return (error);
- return (copyout(&sf, uap->buf, sizeof(sf)));
+ sfp = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+ error = kern_fhstatfs(td, fh, sfp);
+ if (error == 0)
+ error = copyout(sfp, uap->buf, sizeof(*sfp));
+ free(sfp, M_STATFS);
+ return (error);
}
int
diff --git a/sys/mips/mips/mem.c b/sys/mips/mips/mem.c
index 08bb6b0..30f4b34 100644
--- a/sys/mips/mips/mem.c
+++ b/sys/mips/mips/mem.c
@@ -151,12 +151,6 @@ int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot, vm_memattr_t *memattr)
{
- /*
- * /dev/mem is the only one that makes sense through this
- * interface. For /dev/kmem any physaddr we return here
- * could be transient and hence incorrect or invalid at
- * a later time.
- */
if (dev2unit(dev) != CDEV_MINOR_MEM)
return (-1);
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 0011024..d141aa9 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -50,7 +50,6 @@ SUBDIR= \
${_auxio} \
${_bce} \
bfe \
- bhnd \
bge \
bhnd \
${_bxe} \
@@ -330,6 +329,7 @@ SUBDIR= \
scd \
${_scsi_low} \
sdhci \
+ ${_sdhci_acpi} \
sdhci_pci \
sem \
send \
@@ -662,6 +662,7 @@ _padlock_rng= padlock_rng
_rdrand_rng= rdrand_rng
.endif
_s3= s3
+_sdhci_acpi= sdhci_acpi
_tpm= tpm
_twa= twa
_vesa= vesa
diff --git a/sys/modules/cam/Makefile b/sys/modules/cam/Makefile
index e9fd47f..577c710 100644
--- a/sys/modules/cam/Makefile
+++ b/sys/modules/cam/Makefile
@@ -14,6 +14,7 @@ SRCS+= opt_cd.h
SRCS+= opt_pt.h
SRCS+= opt_sa.h
SRCS+= opt_ses.h
+SRCS+= opt_ddb.h
SRCS+= device_if.h bus_if.h vnode_if.h
SRCS+= cam.c
SRCS+= cam_compat.c
diff --git a/sys/modules/i2c/controllers/ichiic/Makefile b/sys/modules/i2c/controllers/ichiic/Makefile
index 1cebbeb..97db5e8 100644
--- a/sys/modules/i2c/controllers/ichiic/Makefile
+++ b/sys/modules/i2c/controllers/ichiic/Makefile
@@ -2,7 +2,12 @@
.PATH: ${.CURDIR}/../../../../dev/ichiic
KMOD = ig4
-SRCS = device_if.h bus_if.h iicbus_if.h pci_if.h smbus_if.h \
- ig4_iic.c ig4_pci.c ig4_reg.h ig4_var.h
+SRCS = acpi_if.h device_if.h bus_if.h iicbus_if.h pci_if.h \
+ smbus_if.h ${ig4_acpi} ig4_iic.c ig4_pci.c ig4_reg.h \
+ ig4_var.h opt_acpi.h
+
+.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
+ig4_acpi= ig4_acpi.c
+.endif
.include <bsd.kmod.mk>
diff --git a/sys/modules/sdhci_acpi/Makefile b/sys/modules/sdhci_acpi/Makefile
new file mode 100644
index 0000000..0d34805
--- /dev/null
+++ b/sys/modules/sdhci_acpi/Makefile
@@ -0,0 +1,9 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/../../dev/sdhci
+
+KMOD= sdhci_acpi
+SRCS= sdhci_acpi.c sdhci.h sdhci_if.h
+SRCS+= acpi_if.h device_if.h bus_if.h opt_acpi.h pci_if.h mmcbr_if.h
+
+.include <bsd.kmod.mk>
diff --git a/sys/net/ieee8023ad_lacp.c b/sys/net/ieee8023ad_lacp.c
index 4863ac9..c01cec1 100644
--- a/sys/net/ieee8023ad_lacp.c
+++ b/sys/net/ieee8023ad_lacp.c
@@ -526,9 +526,6 @@ lacp_port_create(struct lagg_port *lgp)
struct ifmultiaddr *rifma = NULL;
int error;
- boolean_t active = TRUE; /* XXX should be configurable */
- boolean_t fast = FALSE; /* Configurable via ioctl */
-
link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
sdl.sdl_alen = ETHER_ADDR_LEN;
@@ -557,9 +554,7 @@ lacp_port_create(struct lagg_port *lgp)
lacp_fill_actorinfo(lp, &lp->lp_actor);
lacp_fill_markerinfo(lp, &lp->lp_marker);
- lp->lp_state =
- (active ? LACP_STATE_ACTIVITY : 0) |
- (fast ? LACP_STATE_TIMEOUT : 0);
+ lp->lp_state = LACP_STATE_ACTIVITY;
lp->lp_aggregator = NULL;
lacp_sm_rx_set_expired(lp);
LACP_UNLOCK(lsc);
diff --git a/sys/net/if_bridge.c b/sys/net/if_bridge.c
index b484191..1b43a28 100644
--- a/sys/net/if_bridge.c
+++ b/sys/net/if_bridge.c
@@ -911,14 +911,18 @@ bridge_mutecaps(struct bridge_softc *sc)
mask &= bif->bif_savedcaps;
}
+ BRIDGE_XLOCK(sc);
LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
enabled = bif->bif_ifp->if_capenable;
enabled &= ~BRIDGE_IFCAPS_STRIP;
/* strip off mask bits and enable them again if allowed */
enabled &= ~BRIDGE_IFCAPS_MASK;
enabled |= mask;
+ BRIDGE_UNLOCK(sc);
bridge_set_ifcap(sc, bif, enabled);
+ BRIDGE_LOCK(sc);
}
+ BRIDGE_XDROP(sc);
}
@@ -929,6 +933,8 @@ bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
struct ifreq ifr;
int error;
+ BRIDGE_UNLOCK_ASSERT(sc);
+
bzero(&ifr, sizeof(ifr));
ifr.ifr_reqcap = set;
diff --git a/sys/net/if_bridgevar.h b/sys/net/if_bridgevar.h
index 3210c03..480c90a 100644
--- a/sys/net/if_bridgevar.h
+++ b/sys/net/if_bridgevar.h
@@ -280,6 +280,7 @@ struct ifbpstpconf {
#define BRIDGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define BRIDGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define BRIDGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+#define BRIDGE_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
#define BRIDGE_LOCK2REF(_sc, _err) do { \
mtx_assert(&(_sc)->sc_mtx, MA_OWNED); \
if ((_sc)->sc_iflist_xcnt > 0) \
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c
index 16f872a..0a5fc15 100644
--- a/sys/net/if_lagg.c
+++ b/sys/net/if_lagg.c
@@ -1022,7 +1022,7 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
return (error);
fallback:
- if (lp->lp_ioctl != NULL)
+ if (lp != NULL && lp->lp_ioctl != NULL)
return ((*lp->lp_ioctl)(ifp, cmd, data));
return (EINVAL);
diff --git a/sys/net/if_media.c b/sys/net/if_media.c
index abdfa2b..9834d52 100644
--- a/sys/net/if_media.c
+++ b/sys/net/if_media.c
@@ -107,6 +107,7 @@ ifmedia_removeall(ifm)
LIST_REMOVE(entry, ifm_list);
free(entry, M_IFADDR);
}
+ ifm->ifm_cur = NULL;
}
/*
diff --git a/sys/netgraph/ng_mppc.c b/sys/netgraph/ng_mppc.c
index 76f4c3b..3ffcdcd 100644
--- a/sys/netgraph/ng_mppc.c
+++ b/sys/netgraph/ng_mppc.c
@@ -66,7 +66,7 @@
#if !defined(NETGRAPH_MPPC_COMPRESSION) && !defined(NETGRAPH_MPPC_ENCRYPTION)
#ifdef KLD_MODULE
-/* XXX NETGRAPH_MPPC_COMPRESSION isn't functional yet */
+#define NETGRAPH_MPPC_COMPRESSION
#define NETGRAPH_MPPC_ENCRYPTION
#else
/* This case is indicative of an error in sys/conf files */
@@ -81,7 +81,6 @@ static MALLOC_DEFINE(M_NETGRAPH_MPPC, "netgraph_mppc", "netgraph mppc node");
#endif
#ifdef NETGRAPH_MPPC_COMPRESSION
-/* XXX this file doesn't exist yet, but hopefully someday it will... */
#include <net/mppc.h>
#endif
#ifdef NETGRAPH_MPPC_ENCRYPTION
@@ -543,7 +542,7 @@ err1:
&destCnt, d->history, flags, 0);
/* Check return value */
- KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__));
+ /* KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__)); */
if ((rtn & MPPC_EXPANDED) == 0
&& (rtn & MPPC_COMP_OK) == MPPC_COMP_OK) {
outlen -= destCnt;
@@ -805,7 +804,7 @@ failed:
&sourceCnt, &destCnt, d->history, flags);
/* Check return value */
- KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__));
+ /* KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__)); */
if ((rtn & MPPC_DEST_EXHAUSTED) != 0
|| (rtn & MPPC_DECOMP_OK) != MPPC_DECOMP_OK) {
log(LOG_ERR, "%s: decomp returned 0x%x",
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 6543cca..7023fe1 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -553,24 +553,35 @@ tooshort:
m_adj(m, ip_len - m->m_pkthdr.len);
}
- /* Try to forward the packet, but if we fail continue */
+ /*
+ * Try to forward the packet, but if we fail continue.
+ * ip_tryforward() does inbound and outbound packet firewall
+ * processing. If firewall has decided that destination becomes
+ * our local address, it sets M_FASTFWD_OURS flag. In this
+ * case skip another inbound firewall processing and update
+ * ip pointer.
+ */
+ if (V_ipforwarding != 0
#ifdef IPSEC
- /* For now we do not handle IPSEC in tryforward. */
- if (!key_havesp(IPSEC_DIR_INBOUND) && !key_havesp(IPSEC_DIR_OUTBOUND) &&
- (V_ipforwarding == 1))
- if (ip_tryforward(m) == NULL)
+ && !key_havesp(IPSEC_DIR_INBOUND)
+ && !key_havesp(IPSEC_DIR_OUTBOUND)
+#endif
+ ) {
+ if ((m = ip_tryforward(m)) == NULL)
return;
+ if (m->m_flags & M_FASTFWD_OURS) {
+ m->m_flags &= ~M_FASTFWD_OURS;
+ ip = mtod(m, struct ip *);
+ goto ours;
+ }
+ }
+#ifdef IPSEC
/*
* Bypass packet filtering for packets previously handled by IPsec.
*/
if (ip_ipsec_filtertunnel(m))
goto passin;
-#else
- if (V_ipforwarding == 1)
- if (ip_tryforward(m) == NULL)
- return;
-#endif /* IPSEC */
-
+#endif
/*
* Run through list of hooks for input packets.
*
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index 848c922..a1db048 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -69,10 +69,12 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <sys/sbuf.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
@@ -623,6 +625,9 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
char ip6buf[INET6_ADDRSTRLEN];
#endif
+ if (jailed_without_vnet(curthread->td_ucred) != 0)
+ return (EPERM);
+
sbuf_new(&sb, NULL, linesize * (V_tcp_hostcache.cache_count + 1),
SBUF_INCLUDENUL);
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index eba8ca3..6519f99 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -1262,8 +1262,8 @@ send:
#ifdef INET6
if (isipv6) {
/*
- * ip6_plen is not need to be filled now, and will be filled
- * in ip6_output.
+ * There is no need to fill in ip6_plen right now.
+ * It will be filled later by ip6_output.
*/
m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) +
diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c
index cfb7628..0678b4c 100644
--- a/sys/netinet6/in6_proto.c
+++ b/sys/netinet6/in6_proto.c
@@ -507,19 +507,21 @@ sysctl_ip6_tempvltime(SYSCTL_HANDLER_ARGS)
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_FORWARDING, forwarding,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_forwarding), 0,
- "Enable IPv6 forwarding between interfaces");
+ "Enable forwarding of IPv6 packets between interfaces");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_SENDREDIRECTS, redirect,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_sendredirects), 0,
- "Send a redirect message when forwarding back to a source link");
+ "Send ICMPv6 redirects for unforwardable IPv6 packets");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFHLIM, hlim,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_defhlim), 0,
- "Default hop limit");
+ "Default hop limit to use for outgoing IPv6 packets");
SYSCTL_VNET_PCPUSTAT(_net_inet6_ip6, IPV6CTL_STATS, stats, struct ip6stat,
ip6stat,
"IP6 statistics (struct ip6stat, netinet6/ip6_var.h)");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragpackets), 0,
- "Maximum allowed number of outstanding fragmented IPv6 packets");
+ "Default maximum number of outstanding fragmented IPv6 packets. "
+ "A value of 0 means no fragmented packets will be accepted, while a "
+ "a value of -1 means no limit");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_ACCEPT_RTADV, accept_rtadv,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_accept_rtadv), 0,
"Default value of per-interface flag for accepting ICMPv6 RA messages");
@@ -541,7 +543,8 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_LOG_INTERVAL, log_interval,
"Frequency in seconds at which to log IPv6 forwarding errors");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_HDRNESTLIMIT, hdrnestlimit,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_hdrnestlimit), 0,
- "Maximum allowed number of nested protocol headers");
+ "Default maximum number of IPv6 extension headers permitted on "
+ "incoming IPv6 packets, 0 for no artificial limit");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DAD_COUNT, dad_count,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_dad_count), 0,
"Number of ICMPv6 NS messages sent during duplicate address detection");
@@ -550,7 +553,8 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_AUTO_FLOWLABEL, auto_flowlabel,
"Provide an IPv6 flowlabel in outbound packets");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFMCASTHLIM, defmcasthlim,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_defmcasthlim), 0,
- "Default hop limit for multicast packets");
+ "Default hop limit for IPv6 multicast packets originating from this "
+ "node");
SYSCTL_STRING(_net_inet6_ip6, IPV6CTL_KAME_VERSION, kame_version,
CTLFLAG_RD, __KAME_VERSION, 0,
"KAME version string");
diff --git a/sys/netinet6/ip6_input.c b/sys/netinet6/ip6_input.c
index f5e6cb2..99b5fe6 100644
--- a/sys/netinet6/ip6_input.c
+++ b/sys/netinet6/ip6_input.c
@@ -553,6 +553,7 @@ ip6_input(struct mbuf *m)
struct in6_addr odst;
struct ip6_hdr *ip6;
struct in6_ifaddr *ia;
+ struct ifnet *rcvif;
u_int32_t plen;
u_int32_t rtalert = ~0;
int off = sizeof(struct ip6_hdr), nest;
@@ -562,7 +563,8 @@ ip6_input(struct mbuf *m)
/*
* Drop the packet if IPv6 operation is disabled on the interface.
*/
- if ((ND_IFINFO(m->m_pkthdr.rcvif)->flags & ND6_IFF_IFDISABLED))
+ rcvif = m->m_pkthdr.rcvif;
+ if ((ND_IFINFO(rcvif)->flags & ND6_IFF_IFDISABLED))
goto bad;
#ifdef IPSEC
@@ -599,16 +601,15 @@ ip6_input(struct mbuf *m)
if (m->m_next) {
if (m->m_flags & M_LOOP) {
IP6STAT_INC(ip6s_m2m[V_loif->if_index]);
- } else if (m->m_pkthdr.rcvif->if_index < IP6S_M2MMAX)
- IP6STAT_INC(
- ip6s_m2m[m->m_pkthdr.rcvif->if_index]);
+ } else if (rcvif->if_index < IP6S_M2MMAX)
+ IP6STAT_INC(ip6s_m2m[rcvif->if_index]);
else
IP6STAT_INC(ip6s_m2m[0]);
} else
IP6STAT_INC(ip6s_m1);
}
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_receive);
+ in6_ifstat_inc(rcvif, ifs6_in_receive);
IP6STAT_INC(ip6s_total);
#ifndef PULLDOWN_TEST
@@ -624,10 +625,8 @@ ip6_input(struct mbuf *m)
n = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
else
n = m_gethdr(M_NOWAIT, MT_DATA);
- if (n == NULL) {
- m_freem(m);
- return; /* ENOBUFS */
- }
+ if (n == NULL)
+ goto bad;
m_move_pkthdr(n, m);
m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
@@ -639,26 +638,22 @@ ip6_input(struct mbuf *m)
#endif
if (m->m_len < sizeof(struct ip6_hdr)) {
- struct ifnet *inifp;
- inifp = m->m_pkthdr.rcvif;
if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
IP6STAT_INC(ip6s_toosmall);
- in6_ifstat_inc(inifp, ifs6_in_hdrerr);
- return;
+ in6_ifstat_inc(rcvif, ifs6_in_hdrerr);
+ goto bad;
}
}
ip6 = mtod(m, struct ip6_hdr *);
-
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
IP6STAT_INC(ip6s_badvers);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
+ in6_ifstat_inc(rcvif, ifs6_in_hdrerr);
goto bad;
}
IP6STAT_INC(ip6s_nxthist[ip6->ip6_nxt]);
-
- IP_PROBE(receive, NULL, NULL, ip6, m->m_pkthdr.rcvif, NULL, ip6);
+ IP_PROBE(receive, NULL, NULL, ip6, rcvif, NULL, ip6);
/*
* Check against address spoofing/corruption.
@@ -669,7 +664,7 @@ ip6_input(struct mbuf *m)
* XXX: "badscope" is not very suitable for a multicast source.
*/
IP6STAT_INC(ip6s_badscope);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
if (IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) &&
@@ -681,7 +676,7 @@ ip6_input(struct mbuf *m)
* as the outgoing/incoming interface.
*/
IP6STAT_INC(ip6s_badscope);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) &&
@@ -693,7 +688,7 @@ ip6_input(struct mbuf *m)
* a packet is received, it must be silently dropped.
*/
IP6STAT_INC(ip6s_badscope);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
#ifdef ALTQ
@@ -717,7 +712,7 @@ ip6_input(struct mbuf *m)
if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
#if 0
@@ -735,23 +730,36 @@ ip6_input(struct mbuf *m)
goto bad;
}
#endif
- /* Try to forward the packet, but if we fail continue */
+ /*
+ * Try to forward the packet, but if we fail continue.
+ * ip6_tryforward() does inbound and outbound packet firewall
+ * processing. If firewall has decided that destination becomes
+ * our local address, it sets M_FASTFWD_OURS flag. In this
+ * case skip another inbound firewall processing and update
+ * ip6 pointer.
+ */
+ if (V_ip6_forwarding != 0
#ifdef IPSEC
- if (V_ip6_forwarding != 0 && !key_havesp(IPSEC_DIR_INBOUND) &&
- !key_havesp(IPSEC_DIR_OUTBOUND))
- if (ip6_tryforward(m) == NULL)
+ && !key_havesp(IPSEC_DIR_INBOUND)
+ && !key_havesp(IPSEC_DIR_OUTBOUND)
+#endif
+ ) {
+ if ((m = ip6_tryforward(m)) == NULL)
return;
+ if (m->m_flags & M_FASTFWD_OURS) {
+ m->m_flags &= ~M_FASTFWD_OURS;
+ ours = 1;
+ ip6 = mtod(m, struct ip6_hdr *);
+ goto hbhcheck;
+ }
+ }
+#ifdef IPSEC
/*
* Bypass packet filtering for packets previously handled by IPsec.
*/
if (ip6_ipsec_filtertunnel(m))
goto passin;
-#else
- if (V_ip6_forwarding != 0)
- if (ip6_tryforward(m) == NULL)
- return;
-#endif /* IPSEC */
-
+#endif
/*
* Run through list of hooks for input packets.
*
@@ -759,12 +767,12 @@ ip6_input(struct mbuf *m)
* (e.g. by NAT rewriting). When this happens,
* tell ip6_forward to do the right thing.
*/
- odst = ip6->ip6_dst;
/* Jump over all PFIL processing if hooks are not active. */
if (!PFIL_HOOKED(&V_inet6_pfil_hook))
goto passin;
+ odst = ip6->ip6_dst;
if (pfil_run_hooks(&V_inet6_pfil_hook, &m,
m->m_pkthdr.rcvif, PFIL_IN, NULL))
return;
@@ -804,8 +812,8 @@ passin:
IP6STAT_INC(ip6s_badscope); /* XXX */
goto bad;
}
- if (in6_setscope(&ip6->ip6_src, m->m_pkthdr.rcvif, NULL) ||
- in6_setscope(&ip6->ip6_dst, m->m_pkthdr.rcvif, NULL)) {
+ if (in6_setscope(&ip6->ip6_src, rcvif, NULL) ||
+ in6_setscope(&ip6->ip6_dst, rcvif, NULL)) {
IP6STAT_INC(ip6s_badscope);
goto bad;
}
@@ -815,7 +823,7 @@ passin:
*/
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
ours = 1;
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mcast);
+ in6_ifstat_inc(rcvif, ifs6_in_mcast);
goto hbhcheck;
}
/*
@@ -850,7 +858,6 @@ passin:
*/
if (!V_ip6_forwarding) {
IP6STAT_INC(ip6s_cantforward);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
goto bad;
}
@@ -882,7 +889,7 @@ passin:
*/
if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) {
IP6STAT_INC(ip6s_tooshort);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
+ in6_ifstat_inc(rcvif, ifs6_in_truncated);
goto bad;
}
if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) {
@@ -909,10 +916,8 @@ passin:
* XXX TODO: Check hlim and multicast scope here to avoid
* unnecessarily calling into ip6_mforward().
*/
- if (ip6_mforward &&
- ip6_mforward(ip6, m->m_pkthdr.rcvif, m)) {
+ if (ip6_mforward && ip6_mforward(ip6, rcvif, m)) {
IP6STAT_INC(ip6s_cantforward);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
goto bad;
}
} else if (!ours) {
@@ -934,7 +939,7 @@ passin:
if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
@@ -942,7 +947,7 @@ passin:
* Tell launch routine the next header
*/
IP6STAT_INC(ip6s_delivered);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_deliver);
+ in6_ifstat_inc(rcvif, ifs6_in_deliver);
nest = 0;
while (nxt != IPPROTO_DONE) {
@@ -957,7 +962,7 @@ passin:
*/
if (m->m_pkthdr.len < off) {
IP6STAT_INC(ip6s_tooshort);
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
+ in6_ifstat_inc(rcvif, ifs6_in_truncated);
goto bad;
}
@@ -975,7 +980,9 @@ passin:
}
return;
bad:
- m_freem(m);
+ in6_ifstat_inc(rcvif, ifs6_in_discard);
+ if (m != NULL)
+ m_freem(m);
}
/*
diff --git a/sys/netipsec/ipsec.c b/sys/netipsec/ipsec.c
index 6cdfc65..413455f 100644
--- a/sys/netipsec/ipsec.c
+++ b/sys/netipsec/ipsec.c
@@ -241,7 +241,7 @@ SYSCTL_VNET_PCPUSTAT(_net_inet6_ipsec6, IPSECCTL_STATS, ipsecstats,
#endif /* INET6 */
static int ipsec_in_reject(struct secpolicy *, const struct mbuf *);
-static int ipsec_setspidx_inpcb(const struct mbuf *, struct inpcb *);
+static int ipsec_setspidx_inpcb(const struct mbuf *, struct inpcb *, u_int);
static int ipsec_setspidx(const struct mbuf *, struct secpolicyindex *, int);
static void ipsec4_get_ulp(const struct mbuf *m, struct secpolicyindex *, int);
static int ipsec4_setspidx_ipaddr(const struct mbuf *, struct secpolicyindex *);
@@ -344,7 +344,7 @@ ipsec_getpolicybysock(const struct mbuf *m, u_int dir, struct inpcb *inp,
}
/* Set spidx in pcb. */
- *error = ipsec_setspidx_inpcb(m, inp);
+ *error = ipsec_setspidx_inpcb(m, inp, dir);
if (*error)
return (NULL);
@@ -501,8 +501,9 @@ ipsec4_checkpolicy(const struct mbuf *m, u_int dir, int *error,
}
static int
-ipsec_setspidx_inpcb(const struct mbuf *m, struct inpcb *inp)
+ipsec_setspidx_inpcb(const struct mbuf *m, struct inpcb *inp, u_int dir)
{
+ struct secpolicyindex *spidx;
int error;
IPSEC_ASSERT(inp != NULL, ("null inp"));
@@ -510,11 +511,13 @@ ipsec_setspidx_inpcb(const struct mbuf *m, struct inpcb *inp)
IPSEC_ASSERT(inp->inp_sp->sp_out != NULL && inp->inp_sp->sp_in != NULL,
("null sp_in || sp_out"));
- error = ipsec_setspidx(m, &inp->inp_sp->sp_in->spidx, 1);
+ if (dir == IPSEC_DIR_INBOUND)
+ spidx = &inp->inp_sp->sp_in->spidx;
+ else
+ spidx = &inp->inp_sp->sp_out->spidx;
+ error = ipsec_setspidx(m, spidx, 1);
if (error == 0) {
- inp->inp_sp->sp_in->spidx.dir = IPSEC_DIR_INBOUND;
- inp->inp_sp->sp_out->spidx = inp->inp_sp->sp_in->spidx;
- inp->inp_sp->sp_out->spidx.dir = IPSEC_DIR_OUTBOUND;
+ spidx->dir = dir;
} else {
bzero(&inp->inp_sp->sp_in->spidx,
sizeof (inp->inp_sp->sp_in->spidx));
diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h
index e90781a..758479f 100644
--- a/sys/netpfil/ipfw/ip_fw_private.h
+++ b/sys/netpfil/ipfw/ip_fw_private.h
@@ -412,7 +412,7 @@ struct ipfw_ifc {
#define IPFW_PF_RUNLOCK(p) IPFW_RUNLOCK(p)
#else /* FreeBSD */
#define IPFW_LOCK_INIT(_chain) do { \
- rm_init(&(_chain)->rwmtx, "IPFW static rules"); \
+ rm_init_flags(&(_chain)->rwmtx, "IPFW static rules", RM_RECURSE); \
rw_init(&(_chain)->uh_lock, "IPFW UH lock"); \
} while (0)
diff --git a/sys/ofed/drivers/net/mlx4/main.c b/sys/ofed/drivers/net/mlx4/main.c
index 78e8f1b..c4e9cb4 100644
--- a/sys/ofed/drivers/net/mlx4/main.c
+++ b/sys/ofed/drivers/net/mlx4/main.c
@@ -3635,47 +3635,61 @@ int mlx4_restart_one(struct pci_dev *pdev)
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
/* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6340),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x634a),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6354),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6732),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x673c),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6368),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6750),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6372),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x675a),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6764),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6746),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x676e),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1002),
+ .driver_data = MLX4_PCI_DEV_IS_VF },
/* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x1003) },
/* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
- { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1004),
+ .driver_data = MLX4_PCI_DEV_IS_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
{ 0, }
};
diff --git a/sys/powerpc/powerpc/mem.c b/sys/powerpc/powerpc/mem.c
index a7fbfa3..2bc89fd 100644
--- a/sys/powerpc/powerpc/mem.c
+++ b/sys/powerpc/powerpc/mem.c
@@ -179,8 +179,6 @@ memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
if (dev2unit(dev) == CDEV_MINOR_MEM)
*paddr = offset;
- else if (dev2unit(dev) == CDEV_MINOR_KMEM)
- *paddr = vtophys(offset);
else
return (EFAULT);
diff --git a/sys/sparc64/sparc64/mem.c b/sys/sparc64/sparc64/mem.c
index 6bd5225..a25bef4 100644
--- a/sys/sparc64/sparc64/mem.c
+++ b/sys/sparc64/sparc64/mem.c
@@ -43,7 +43,7 @@ __FBSDID("$FreeBSD$");
/*
* Memory special file
*
- * NOTE: other architectures support mmap()'ing the mem and kmem devices; this
+ * NOTE: other architectures support mmap()'ing the mem device; this
* might cause illegal aliases to be created for the locked kernel page(s), so
* it is not implemented.
*/
diff --git a/sys/sys/cdefs.h b/sys/sys/cdefs.h
index 453b395..179a609 100644
--- a/sys/sys/cdefs.h
+++ b/sys/sys/cdefs.h
@@ -543,22 +543,6 @@
__attribute__((__format__ (__strftime__, fmtarg, firstvararg)))
#endif
-/*
- * FORTIFY_SOURCE, and perhaps other compiler-specific features, require
- * the use of non-standard inlining. In general we should try to avoid
- * using these but GCC-compatible compilers tend to support the extensions
- * well enough to use them in limited cases.
- */
-#if defined(__GNUC_GNU_INLINE__) || defined(__GNUC_STDC_INLINE__)
-#if __GNUC_PREREQ__(4, 3) || __has_attribute(__artificial__)
-#define __gnu_inline __attribute__((__gnu_inline__, __artificial__))
-#else
-#define __gnu_inline __attribute__((__gnu_inline__))
-#endif /* artificial */
-#else
-#define __gnu_inline
-#endif
-
/* Compiler-dependent macros that rely on FreeBSD-specific extensions. */
#if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 300001 && \
defined(__GNUC__) && !defined(__INTEL_COMPILER)
@@ -809,6 +793,13 @@
#if !(defined(__clang__) && __has_feature(nullability))
#define _Nonnull
#define _Nullable
+#define _Null_unspecified
+#define __NULLABILITY_PRAGMA_PUSH
+#define __NULLABILITY_PRAGMA_POP
+#else
+#define __NULLABILITY_PRAGMA_PUSH _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wnullability-completeness\"")
+#define __NULLABILITY_PRAGMA_POP _Pragma("clang diagnostic pop")
#endif
/*
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index ae14f6e..12d2e41 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -593,6 +593,7 @@ struct uio;
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_MOUNT);
+MALLOC_DECLARE(M_STATFS);
#endif
extern int maxvfsconf; /* highest defined filesystem type */
diff --git a/sys/sys/seq.h b/sys/sys/seq.h
index f54a4a9..7e6de5b 100644
--- a/sys/sys/seq.h
+++ b/sys/sys/seq.h
@@ -59,7 +59,6 @@ typedef uint32_t seq_t;
* lobj = gobj;
* if (seq_consistent(&gobj->seq, seq))
* break;
- * cpu_spinwait();
* }
* foo(lobj);
*/
diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h
index b19126f..a39a9f2 100644
--- a/sys/sys/syscall.h
+++ b/sys/sys/syscall.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/kern/syscalls.master 304977 2016-08-29 05:15:43Z kib
+ * created from FreeBSD: stable/11/sys/kern/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
#define SYS_syscall 0
diff --git a/sys/sys/syscall.mk b/sys/sys/syscall.mk
index b6f6c21..1a073ef 100644
--- a/sys/sys/syscall.mk
+++ b/sys/sys/syscall.mk
@@ -1,7 +1,7 @@
# FreeBSD system call object files.
# DO NOT EDIT-- this file is automatically generated.
# $FreeBSD$
-# created from FreeBSD: stable/11/sys/kern/syscalls.master 304977 2016-08-29 05:15:43Z kib
+# created from FreeBSD: stable/11/sys/kern/syscalls.master 313450 2017-02-08 18:32:35Z jhb
MIASM = \
syscall.o \
exit.o \
diff --git a/sys/sys/syscallsubr.h b/sys/sys/syscallsubr.h
index 42ee515..eff7aca 100644
--- a/sys/sys/syscallsubr.h
+++ b/sys/sys/syscallsubr.h
@@ -109,7 +109,7 @@ int kern_futimens(struct thread *td, int fd, struct timespec *tptr,
int kern_getdirentries(struct thread *td, int fd, char *buf, u_int count,
long *basep, ssize_t *residp, enum uio_seg bufseg);
int kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
- size_t *countp, enum uio_seg bufseg, int flags);
+ size_t *countp, enum uio_seg bufseg, int mode);
int kern_getitimer(struct thread *, u_int, struct itimerval *);
int kern_getppid(struct thread *);
int kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
diff --git a/sys/sys/sysproto.h b/sys/sys/sysproto.h
index 3eb0c00..df6e8fa 100644
--- a/sys/sys/sysproto.h
+++ b/sys/sys/sysproto.h
@@ -3,7 +3,7 @@
*
* DO NOT EDIT-- this file is automatically generated.
* $FreeBSD$
- * created from FreeBSD: stable/11/sys/kern/syscalls.master 304977 2016-08-29 05:15:43Z kib
+ * created from FreeBSD: stable/11/sys/kern/syscalls.master 313450 2017-02-08 18:32:35Z jhb
*/
#ifndef _SYS_SYSPROTO_H_
@@ -1107,7 +1107,7 @@ struct mac_syscall_args {
struct getfsstat_args {
char buf_l_[PADL_(struct statfs *)]; struct statfs * buf; char buf_r_[PADR_(struct statfs *)];
char bufsize_l_[PADL_(long)]; long bufsize; char bufsize_r_[PADR_(long)];
- char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
+ char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)];
};
struct statfs_args {
char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
@@ -2356,7 +2356,7 @@ int ogetdirentries(struct thread *, struct ogetdirentries_args *);
struct freebsd4_getfsstat_args {
char buf_l_[PADL_(struct ostatfs *)]; struct ostatfs * buf; char buf_r_[PADR_(struct ostatfs *)];
char bufsize_l_[PADL_(long)]; long bufsize; char bufsize_r_[PADR_(long)];
- char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
+ char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)];
};
struct freebsd4_statfs_args {
char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
diff --git a/sys/sys/unistd.h b/sys/sys/unistd.h
index 2429134..0875509 100644
--- a/sys/sys/unistd.h
+++ b/sys/sys/unistd.h
@@ -65,7 +65,7 @@
#define _POSIX_MONOTONIC_CLOCK 200112L
#define _POSIX_NO_TRUNC 1
#define _POSIX_PRIORITIZED_IO (-1)
-#define _POSIX_PRIORITY_SCHEDULING 200112L
+#define _POSIX_PRIORITY_SCHEDULING 0
#define _POSIX_RAW_SOCKETS 200112L
#define _POSIX_REALTIME_SIGNALS 200112L
#define _POSIX_SEMAPHORES 200112L
diff --git a/sys/tools/embed_mfs.sh b/sys/tools/embed_mfs.sh
index 3f20257..a7ac80c 100644
--- a/sys/tools/embed_mfs.sh
+++ b/sys/tools/embed_mfs.sh
@@ -36,12 +36,12 @@ mfs_size=`stat -f '%z' $2 2> /dev/null`
# If we can't determine MFS image size - bail.
[ -z ${mfs_size} ] && echo "Can't determine MFS image size" && exit 1
-sec_info=`objdump -h $1 2> /dev/null | grep " oldmfs "`
+sec_info=`elfdump -c $1 2> /dev/null | grep -A 5 -E "sh_name: oldmfs$"`
# If we can't find the mfs section within the given kernel - bail.
[ -z "${sec_info}" ] && echo "Can't locate mfs section within kernel" && exit 1
-sec_size=`echo ${sec_info} | awk '{printf("%d", "0x" $3)}' 2> /dev/null`
-sec_start=`echo ${sec_info} | awk '{printf("%d", "0x" $6)}' 2> /dev/null`
+sec_size=`echo "${sec_info}" | awk '/sh_size/ {print $2}' 2> /dev/null`
+sec_start=`echo "${sec_info}" | awk '/sh_offset/ {print $2}' 2> /dev/null`
# If the mfs section size is smaller than the mfs image - bail.
[ ${sec_size} -lt ${mfs_size} ] && echo "MFS image too large" && exit 1
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 812cc56..94ac101 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -11525,7 +11525,8 @@ handle_written_inodeblock(inodedep, bp, flags)
panic("handle_written_inodeblock: bad size");
if (inodedep->id_savednlink > LINK_MAX)
panic("handle_written_inodeblock: Invalid link count "
- "%d for inodedep %p", inodedep->id_savednlink, inodedep);
+ "%jd for inodedep %p", (uintmax_t)inodedep->id_savednlink,
+ inodedep);
if (fstype == UFS1) {
if (dp1->di_nlink != inodedep->id_savednlink) {
dp1->di_nlink = inodedep->id_savednlink;
@@ -14271,13 +14272,14 @@ softdep_error(func, error)
static void
inodedep_print(struct inodedep *inodedep, int verbose)
{
- db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
+ db_printf("%p fs %p st %x ino %jd inoblk %jd delta %jd nlink %jd"
" saveino %p\n",
inodedep, inodedep->id_fs, inodedep->id_state,
(intmax_t)inodedep->id_ino,
(intmax_t)fsbtodb(inodedep->id_fs,
ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
- inodedep->id_nlinkdelta, inodedep->id_savednlink,
+ (intmax_t)inodedep->id_nlinkdelta,
+ (intmax_t)inodedep->id_savednlink,
inodedep->id_savedino1);
if (verbose == 0)
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index f7b6ea2..2e8bded 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -948,8 +948,8 @@ print_bad_link_count(const char *funcname, struct vnode *dvp)
struct inode *dip;
dip = VTOI(dvp);
- uprintf("%s: Bad link count %d on parent inode %d in file system %s\n",
- funcname, dip->i_effnlink, dip->i_number,
+ uprintf("%s: Bad link count %d on parent inode %jd in file system %s\n",
+ funcname, dip->i_effnlink, (intmax_t)dip->i_number,
dvp->v_mount->mnt_stat.f_mntonname);
}
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index bce8b12..0887a70 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -63,6 +63,8 @@ static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
static void dev_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
static void dev_pager_free_page(vm_object_t object, vm_page_t m);
+static int dev_pager_populate(vm_object_t object, vm_pindex_t pidx,
+ int fault_type, vm_prot_t, vm_pindex_t *first, vm_pindex_t *last);
/* list of device pager objects */
static struct pagerlst dev_pager_object_list;
@@ -84,6 +86,7 @@ struct pagerops mgtdevicepagerops = {
.pgo_getpages = dev_pager_getpages,
.pgo_putpages = dev_pager_putpages,
.pgo_haspage = dev_pager_haspage,
+ .pgo_populate = dev_pager_populate,
};
static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
@@ -127,6 +130,8 @@ cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
return (NULL);
+ KASSERT(tp == OBJT_MGTDEVICE || ops->cdev_pg_populate == NULL,
+ ("populate on unmanaged device pager"));
/*
* Offset should be page aligned.
@@ -179,6 +184,8 @@ cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
object->handle = handle;
TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
pager_object_list);
+ if (ops->cdev_pg_populate != NULL)
+ vm_object_set_flag(object, OBJ_POPULATE);
}
} else {
if (pindex > object->size)
@@ -268,6 +275,8 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
/* Since our haspage reports zero after/before, the count is 1. */
KASSERT(count == 1, ("%s: count %d", __func__, count));
VM_OBJECT_ASSERT_WLOCKED(object);
+ if (object->un_pager.devp.ops->cdev_pg_fault == NULL)
+ return (VM_PAGER_FAIL);
error = object->un_pager.devp.ops->cdev_pg_fault(object,
IDX_TO_OFF(ma[0]->pindex), PROT_READ, &ma[0]);
@@ -293,6 +302,18 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
}
static int
+dev_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
+ vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
+{
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ if (object->un_pager.devp.ops->cdev_pg_populate == NULL)
+ return (VM_PAGER_FAIL);
+ return (object->un_pager.devp.ops->cdev_pg_populate(object, pidx,
+ fault_type, max_prot, first, last));
+}
+
+static int
old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
vm_page_t *mres)
{
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index f3b23b0..7adc7c6 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
/* list of phys pager objects */
@@ -98,6 +99,7 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object = object1;
object1 = NULL;
object->handle = handle;
+ vm_object_set_flag(object, OBJ_POPULATE);
TAILQ_INSERT_TAIL(&phys_pager_object_list,
object, pager_object_list);
}
@@ -109,6 +111,7 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_object_deallocate(object1);
} else {
object = vm_object_allocate(OBJT_PHYS, pindex);
+ vm_object_set_flag(object, OBJ_POPULATE);
}
return (object);
@@ -157,32 +160,101 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
return (VM_PAGER_OK);
}
-static void
-phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
- int *rtvals)
-{
-
- panic("phys_pager_putpage called");
-}
-
/*
* Implement a pretty aggressive clustered getpages strategy. Hint that
* everything in an entire 4MB window should be prefaulted at once.
*
- * XXX 4MB (1024 slots per page table page) is convenient for x86,
+ * 4MB (1024 slots per page table page) is convenient for x86,
* but may not be for other arches.
*/
#ifndef PHYSCLUSTER
#define PHYSCLUSTER 1024
#endif
+static int phys_pager_cluster = PHYSCLUSTER;
+SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN,
+ &phys_pager_cluster, 0,
+ "prefault window size for phys pager");
+
+/*
+ * Max hint to vm_page_alloc() about the further allocation needs
+ * inside the phys_pager_populate() loop. The number of bits used to
+ * implement VM_ALLOC_COUNT() determines the hard limit on this value.
+ * That limit is currently 65535.
+ */
+#define PHYSALLOC 16
+
+static int
+phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
+ int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first,
+ vm_pindex_t *last)
+{
+ vm_page_t m;
+ vm_pindex_t base, end, i;
+ int ahead;
+
+ base = rounddown(pidx, phys_pager_cluster);
+ end = base + phys_pager_cluster - 1;
+ if (end >= object->size)
+ end = object->size - 1;
+ if (*first > base)
+ base = *first;
+ if (end > *last)
+ end = *last;
+ *first = base;
+ *last = end;
+
+ for (i = base; i <= end; i++) {
+retry:
+ m = vm_page_lookup(object, i);
+ if (m == NULL) {
+ ahead = MIN(end - i, PHYSALLOC);
+ m = vm_page_alloc(object, i, VM_ALLOC_NORMAL |
+ VM_ALLOC_ZERO | VM_ALLOC_COUNT(ahead));
+ if (m == NULL) {
+ VM_OBJECT_WUNLOCK(object);
+ VM_WAIT;
+ VM_OBJECT_WLOCK(object);
+ goto retry;
+ }
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+ m->valid = VM_PAGE_BITS_ALL;
+ } else if (vm_page_xbusied(m)) {
+ vm_page_lock(m);
+ VM_OBJECT_WUNLOCK(object);
+ vm_page_busy_sleep(m, "physb", true);
+ VM_OBJECT_WLOCK(object);
+ goto retry;
+ } else {
+ vm_page_xbusy(m);
+ if (m->valid != VM_PAGE_BITS_ALL)
+ vm_page_zero_invalid(m, TRUE);
+ }
+
+ KASSERT(m->valid == VM_PAGE_BITS_ALL,
+ ("phys_pager_populate: partially valid page %p", m));
+ KASSERT(m->dirty == 0,
+ ("phys_pager_populate: dirty page %p", m));
+ }
+ return (VM_PAGER_OK);
+}
+
+static void
+phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
+ int *rtvals)
+{
+
+ panic("phys_pager_putpage called");
+}
+
static boolean_t
phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
int *after)
{
vm_pindex_t base, end;
- base = rounddown2(pindex, PHYSCLUSTER);
- end = base + (PHYSCLUSTER - 1);
+ base = rounddown(pindex, phys_pager_cluster);
+ end = base + phys_pager_cluster - 1;
if (before != NULL)
*before = pindex - base;
if (after != NULL)
@@ -197,4 +269,5 @@ struct pagerops physpagerops = {
.pgo_getpages = phys_pager_getpages,
.pgo_putpages = phys_pager_putpages,
.pgo_haspage = phys_pager_haspage,
+ .pgo_populate = phys_pager_populate,
};
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index e54db2a..90ed297 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -845,8 +845,7 @@ static void
keg_drain(uma_keg_t keg)
{
struct slabhead freeslabs = { 0 };
- uma_slab_t slab;
- uma_slab_t n;
+ uma_slab_t slab, tmp;
/*
* We don't want to take pages from statically allocated kegs at this
@@ -862,15 +861,10 @@ keg_drain(uma_keg_t keg)
if (keg->uk_free == 0)
goto finished;
- slab = LIST_FIRST(&keg->uk_free_slab);
- while (slab) {
- n = LIST_NEXT(slab, us_link);
-
- /* We have no where to free these to */
- if (slab->us_flags & UMA_SLAB_BOOT) {
- slab = n;
+ LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
+ /* We have nowhere to free these to. */
+ if (slab->us_flags & UMA_SLAB_BOOT)
continue;
- }
LIST_REMOVE(slab, us_link);
keg->uk_pages -= keg->uk_ppera;
@@ -880,8 +874,6 @@ keg_drain(uma_keg_t keg)
UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
-
- slab = n;
}
finished:
KEG_UNLOCK(keg);
diff --git a/sys/vm/vm_domain.c b/sys/vm/vm_domain.c
index 4bf5cfd..4945496 100644
--- a/sys/vm/vm_domain.c
+++ b/sys/vm/vm_domain.c
@@ -140,7 +140,6 @@ vm_domain_policy_localcopy(struct vm_domain_policy *dst,
*dst = *src;
if (seq_consistent(&src->seq, seq))
return;
- cpu_spinwait();
}
}
@@ -168,7 +167,6 @@ vm_domain_policy_copy(struct vm_domain_policy *dst,
seq_write_end(&dst->seq);
return;
}
- cpu_spinwait();
}
}
@@ -330,7 +328,6 @@ vm_domain_iterator_set_policy(struct vm_domain_iterator *vi,
_vm_domain_iterator_set_policy(vi, &vt_lcl);
return;
}
- cpu_spinwait();
}
}
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index b5c4fdc..7d44af6 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -289,6 +289,157 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
return (KERN_SUCCESS);
}
+static void
+vm_fault_restore_map_lock(struct faultstate *fs)
+{
+
+ VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
+ MPASS(fs->first_object->paging_in_progress > 0);
+
+ if (!vm_map_trylock_read(fs->map)) {
+ VM_OBJECT_WUNLOCK(fs->first_object);
+ vm_map_lock_read(fs->map);
+ VM_OBJECT_WLOCK(fs->first_object);
+ }
+ fs->lookup_still_valid = true;
+}
+
+static void
+vm_fault_populate_check_page(vm_page_t m)
+{
+
+ /*
+ * Check each page to ensure that the pager is obeying the
+ * interface: the page must be installed in the object, fully
+ * valid, and exclusively busied.
+ */
+ MPASS(m != NULL);
+ MPASS(m->valid == VM_PAGE_BITS_ALL);
+ MPASS(vm_page_xbusied(m));
+}
+
+static void
+vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
+ vm_pindex_t last)
+{
+ vm_page_t m;
+ vm_pindex_t pidx;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ MPASS(first <= last);
+ for (pidx = first, m = vm_page_lookup(object, pidx);
+ pidx <= last; pidx++, m = vm_page_next(m)) {
+ vm_fault_populate_check_page(m);
+ vm_page_lock(m);
+ vm_page_deactivate(m);
+ vm_page_unlock(m);
+ vm_page_xunbusy(m);
+ }
+}
+
+static int
+vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
+ int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
+{
+ vm_page_t m;
+ vm_pindex_t map_first, map_last, pager_first, pager_last, pidx;
+ int rv;
+
+ MPASS(fs->object == fs->first_object);
+ VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
+ MPASS(fs->first_object->paging_in_progress > 0);
+ MPASS(fs->first_object->backing_object == NULL);
+ MPASS(fs->lookup_still_valid);
+
+ pager_first = OFF_TO_IDX(fs->entry->offset);
+ pager_last = OFF_TO_IDX(fs->entry->offset + fs->entry->end -
+ fs->entry->start) - 1;
+ unlock_map(fs);
+ unlock_vp(fs);
+
+ /*
+ * Call the pager (driver) populate() method.
+ *
+ * There is no guarantee that the method will be called again
+ * if the current fault is for read, and a future fault is
+ * for write. Report the entry's maximum allowed protection
+ * to the driver.
+ */
+ rv = vm_pager_populate(fs->first_object, fs->first_pindex,
+ fault_type, fs->entry->max_protection, &pager_first, &pager_last);
+
+ VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
+ if (rv == VM_PAGER_BAD) {
+ /*
+ * VM_PAGER_BAD is the backdoor for a pager to request
+ * normal fault handling.
+ */
+ vm_fault_restore_map_lock(fs);
+ if (fs->map->timestamp != fs->map_generation)
+ return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
+ return (KERN_NOT_RECEIVER);
+ }
+ if (rv != VM_PAGER_OK)
+ return (KERN_FAILURE); /* AKA SIGSEGV */
+
+ /* Ensure that the driver is obeying the interface. */
+ MPASS(pager_first <= pager_last);
+ MPASS(fs->first_pindex <= pager_last);
+ MPASS(fs->first_pindex >= pager_first);
+ MPASS(pager_last < fs->first_object->size);
+
+ vm_fault_restore_map_lock(fs);
+ if (fs->map->timestamp != fs->map_generation) {
+ vm_fault_populate_cleanup(fs->first_object, pager_first,
+ pager_last);
+ return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
+ }
+
+ /*
+ * The map is unchanged after our last unlock. Process the fault.
+ *
+ * The range [pager_first, pager_last] that is given to the
+ * pager is only a hint. The pager may populate any range
+ * within the object that includes the requested page index.
+ * In case the pager expanded the range, clip it to fit into
+ * the map entry.
+ */
+ map_first = MAX(OFF_TO_IDX(fs->entry->offset), pager_first);
+ if (map_first > pager_first)
+ vm_fault_populate_cleanup(fs->first_object, pager_first,
+ map_first - 1);
+ map_last = MIN(OFF_TO_IDX(fs->entry->end - fs->entry->start +
+ fs->entry->offset), pager_last);
+ if (map_last < pager_last)
+ vm_fault_populate_cleanup(fs->first_object, map_last + 1,
+ pager_last);
+
+ for (pidx = map_first, m = vm_page_lookup(fs->first_object, pidx);
+ pidx <= map_last; pidx++, m = vm_page_next(m)) {
+ vm_fault_populate_check_page(m);
+ vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags,
+ true);
+ VM_OBJECT_WUNLOCK(fs->first_object);
+ pmap_enter(fs->map->pmap, fs->entry->start + IDX_TO_OFF(pidx) -
+ fs->entry->offset, m, prot, fault_type | (wired ?
+ PMAP_ENTER_WIRED : 0), 0);
+ VM_OBJECT_WLOCK(fs->first_object);
+ if (pidx == fs->first_pindex)
+ vm_fault_fill_hold(m_hold, m);
+ vm_page_lock(m);
+ if ((fault_flags & VM_FAULT_WIRE) != 0) {
+ KASSERT(wired, ("VM_FAULT_WIRE && !wired"));
+ vm_page_wire(m);
+ } else {
+ vm_page_activate(m);
+ }
+ vm_page_unlock(m);
+ vm_page_xunbusy(m);
+ }
+ curthread->td_ru.ru_majflt++;
+ return (KERN_SUCCESS);
+}
+
/*
* vm_fault:
*
@@ -554,6 +705,30 @@ RetryFault:;
return (KERN_PROTECTION_FAILURE);
}
+ if (fs.object == fs.first_object &&
+ (fs.first_object->flags & OBJ_POPULATE) != 0 &&
+ fs.first_object->shadow_count == 0) {
+ rv = vm_fault_populate(&fs, vaddr, prot,
+ fault_type, fault_flags, wired, m_hold);
+ switch (rv) {
+ case KERN_SUCCESS:
+ case KERN_FAILURE:
+ unlock_and_deallocate(&fs);
+ return (rv);
+ case KERN_RESOURCE_SHORTAGE:
+ unlock_and_deallocate(&fs);
+ goto RetryFault;
+ case KERN_NOT_RECEIVER:
+ /*
+ * Pager's populate() method
+ * returned VM_PAGER_BAD.
+ */
+ break;
+ default:
+ panic("inconsistent return codes");
+ }
+ }
+
/*
* Allocate a new page for this object/offset pair.
*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 66f619d..3b237c7 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -4313,7 +4313,7 @@ DB_SHOW_COMMAND(procvm, procvm)
struct proc *p;
if (have_addr) {
- p = (struct proc *) addr;
+ p = db_lookup_proc(addr);
} else {
p = curproc;
}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 73998aa..2377eec 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -182,6 +182,7 @@ struct vm_object {
*/
#define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */
#define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
+#define OBJ_POPULATE 0x0004 /* pager implements populate() */
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
@@ -194,8 +195,8 @@ struct vm_object {
#define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */
#define OBJ_TMPFS 0x8000 /* has tmpfs vnode allocated */
-#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
-#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
+#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
+#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
#ifdef _KERNEL
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index f73cd00..506e2bc 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -56,6 +56,8 @@ typedef int pgo_getpages_async_t(vm_object_t, vm_page_t *, int, int *, int *,
pgo_getpages_iodone_t, void *);
typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *);
typedef boolean_t pgo_haspage_t(vm_object_t, vm_pindex_t, int *, int *);
+typedef int pgo_populate_t(vm_object_t, vm_pindex_t, int, vm_prot_t,
+ vm_pindex_t *, vm_pindex_t *);
typedef void pgo_pageunswapped_t(vm_page_t);
struct pagerops {
@@ -66,6 +68,7 @@ struct pagerops {
pgo_getpages_async_t *pgo_getpages_async; /* Get page asyncly. */
pgo_putpages_t *pgo_putpages; /* Put (write) page. */
pgo_haspage_t *pgo_haspage; /* Query page. */
+ pgo_populate_t *pgo_populate; /* Bulk spec pagein. */
pgo_pageunswapped_t *pgo_pageunswapped;
};
@@ -151,6 +154,19 @@ vm_pager_has_page(
return (ret);
}
+static __inline int
+vm_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
+ vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
+{
+
+ MPASS((object->flags & OBJ_POPULATE) != 0);
+ MPASS(pidx < object->size);
+ MPASS(object->paging_in_progress > 0);
+ return ((*pagertab[object->type]->pgo_populate)(object, pidx,
+ fault_type, max_prot, first, last));
+}
+
+
/*
* vm_pager_page_unswapped
*
@@ -176,6 +192,9 @@ vm_pager_page_unswapped(vm_page_t m)
struct cdev_pager_ops {
int (*cdev_pg_fault)(vm_object_t vm_obj, vm_ooffset_t offset,
int prot, vm_page_t *mres);
+ int (*cdev_pg_populate)(vm_object_t vm_obj, vm_pindex_t pidx,
+ int fault_type, vm_prot_t max_prot, vm_pindex_t *first,
+ vm_pindex_t *last);
int (*cdev_pg_ctor)(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color);
void (*cdev_pg_dtor)(void *handle);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index a80a9c2..aab7ad6 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -265,7 +265,7 @@ retry:
#endif
VM_OBJECT_WUNLOCK(object);
}
- vref(vp);
+ vrefact(vp);
return (object);
}
diff --git a/sys/x86/acpica/acpi_wakeup.c b/sys/x86/acpica/acpi_wakeup.c
index 869f2eb..d551b02 100644
--- a/sys/x86/acpica/acpi_wakeup.c
+++ b/sys/x86/acpica/acpi_wakeup.c
@@ -88,7 +88,7 @@ static cpuset_t suspcpus;
static struct susppcb **susppcbs;
#endif
-static void *acpi_alloc_wakeup_handler(void);
+static void *acpi_alloc_wakeup_handler(void **);
static void acpi_stop_beep(void *);
#ifdef SMP
@@ -97,18 +97,14 @@ static void acpi_wakeup_cpus(struct acpi_softc *);
#endif
#ifdef __amd64__
-#define ACPI_PAGETABLES 3
+#define ACPI_WAKEPAGES 4
#else
-#define ACPI_PAGETABLES 0
+#define ACPI_WAKEPAGES 1
#endif
-#define WAKECODE_VADDR(sc) \
- ((sc)->acpi_wakeaddr + (ACPI_PAGETABLES * PAGE_SIZE))
-#define WAKECODE_PADDR(sc) \
- ((sc)->acpi_wakephys + (ACPI_PAGETABLES * PAGE_SIZE))
#define WAKECODE_FIXUP(offset, type, val) do { \
type *addr; \
- addr = (type *)(WAKECODE_VADDR(sc) + offset); \
+ addr = (type *)(sc->acpi_wakeaddr + (offset)); \
*addr = val; \
} while (0)
@@ -125,7 +121,7 @@ static int
acpi_wakeup_ap(struct acpi_softc *sc, int cpu)
{
struct pcb *pcb;
- int vector = (WAKECODE_PADDR(sc) >> 12) & 0xff;
+ int vector = (sc->acpi_wakephys >> 12) & 0xff;
int apic_id = cpu_apic_ids[cpu];
int ms;
@@ -168,7 +164,7 @@ acpi_wakeup_cpus(struct acpi_softc *sc)
/* setup a vector to our boot code */
*((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
- *((volatile u_short *)WARMBOOT_SEG) = WAKECODE_PADDR(sc) >> 4;
+ *((volatile u_short *)WARMBOOT_SEG) = sc->acpi_wakephys >> 4;
outb(CMOS_REG, BIOS_RESET);
outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
@@ -209,7 +205,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
if (acpi_resume_beep != 0)
timer_spkr_acquire();
- AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc), 0);
+ AcpiSetFirmwareWakingVector(sc->acpi_wakephys, 0);
intr_suspend();
@@ -309,11 +305,12 @@ acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result,
}
static void *
-acpi_alloc_wakeup_handler(void)
+acpi_alloc_wakeup_handler(void *wakepages[ACPI_WAKEPAGES])
{
- void *wakeaddr;
int i;
+ memset(wakepages, 0, ACPI_WAKEPAGES * sizeof(*wakepages));
+
/*
* Specify the region for our wakeup code. We want it in the low 1 MB
* region, excluding real mode IVT (0-0x3ff), BDA (0x400-0x4ff), EBDA
@@ -321,18 +318,18 @@ acpi_alloc_wakeup_handler(void)
* and ROM area (0xa0000 and above). The temporary page tables must be
* page-aligned.
*/
- wakeaddr = contigmalloc((ACPI_PAGETABLES + 1) * PAGE_SIZE, M_DEVBUF,
- M_NOWAIT, 0x500, 0xa0000, PAGE_SIZE, 0ul);
- if (wakeaddr == NULL) {
- printf("%s: can't alloc wake memory\n", __func__);
- return (NULL);
+ for (i = 0; i < ACPI_WAKEPAGES; i++) {
+ wakepages[i] = contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT,
+ 0x500, 0xa0000, PAGE_SIZE, 0ul);
+ if (wakepages[i] == NULL) {
+ printf("%s: can't alloc wake memory\n", __func__);
+ goto freepages;
+ }
}
if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL,
EVENTHANDLER_PRI_LAST) == NULL) {
printf("%s: can't register event handler\n", __func__);
- contigfree(wakeaddr, (ACPI_PAGETABLES + 1) * PAGE_SIZE,
- M_DEVBUF);
- return (NULL);
+ goto freepages;
}
susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK);
for (i = 0; i < mp_ncpus; i++) {
@@ -340,39 +337,56 @@ acpi_alloc_wakeup_handler(void)
susppcbs[i]->sp_fpususpend = alloc_fpusave(M_WAITOK);
}
- return (wakeaddr);
+ return (wakepages);
+
+freepages:
+ for (i = 0; i < ACPI_WAKEPAGES; i++)
+ if (wakepages[i] != NULL)
+ contigfree(wakepages[i], PAGE_SIZE, M_DEVBUF);
+ return (NULL);
}
void
acpi_install_wakeup_handler(struct acpi_softc *sc)
{
- static void *wakeaddr = NULL;
+ static void *wakeaddr;
+ void *wakepages[ACPI_WAKEPAGES];
#ifdef __amd64__
uint64_t *pt4, *pt3, *pt2;
+ vm_paddr_t pt4pa, pt3pa, pt2pa;
int i;
#endif
if (wakeaddr != NULL)
return;
- wakeaddr = acpi_alloc_wakeup_handler();
- if (wakeaddr == NULL)
+ if (acpi_alloc_wakeup_handler(wakepages) == NULL)
return;
+ wakeaddr = wakepages[0];
sc->acpi_wakeaddr = (vm_offset_t)wakeaddr;
sc->acpi_wakephys = vtophys(wakeaddr);
- bcopy(wakecode, (void *)WAKECODE_VADDR(sc), sizeof(wakecode));
+#ifdef __amd64__
+ pt4 = wakepages[1];
+ pt3 = wakepages[2];
+ pt2 = wakepages[3];
+ pt4pa = vtophys(pt4);
+ pt3pa = vtophys(pt3);
+ pt2pa = vtophys(pt2);
+#endif
+
+ bcopy(wakecode, (void *)sc->acpi_wakeaddr, sizeof(wakecode));
/* Patch GDT base address, ljmp targets. */
WAKECODE_FIXUP((bootgdtdesc + 2), uint32_t,
- WAKECODE_PADDR(sc) + bootgdt);
+ sc->acpi_wakephys + bootgdt);
WAKECODE_FIXUP((wakeup_sw32 + 2), uint32_t,
- WAKECODE_PADDR(sc) + wakeup_32);
+ sc->acpi_wakephys + wakeup_32);
#ifdef __amd64__
WAKECODE_FIXUP((wakeup_sw64 + 1), uint32_t,
- WAKECODE_PADDR(sc) + wakeup_64);
- WAKECODE_FIXUP(wakeup_pagetables, uint32_t, sc->acpi_wakephys);
+ sc->acpi_wakephys + wakeup_64);
+ WAKECODE_FIXUP(wakeup_pagetables, uint32_t, pt4pa);
#endif
/* Save pointers to some global data. */
@@ -384,33 +398,28 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
WAKECODE_FIXUP(wakeup_cr3, register_t, vtophys(kernel_pmap->pm_pdir));
#endif
-#else
- /* Build temporary page tables below realmode code. */
- pt4 = wakeaddr;
- pt3 = pt4 + (PAGE_SIZE) / sizeof(uint64_t);
- pt2 = pt3 + (PAGE_SIZE) / sizeof(uint64_t);
-
+#else /* __amd64__ */
/* Create the initial 1GB replicated page tables */
for (i = 0; i < 512; i++) {
/*
* Each slot of the level 4 pages points
* to the same level 3 page
*/
- pt4[i] = (uint64_t)(sc->acpi_wakephys + PAGE_SIZE);
+ pt4[i] = (uint64_t)pt3pa;
pt4[i] |= PG_V | PG_RW | PG_U;
/*
* Each slot of the level 3 pages points
* to the same level 2 page
*/
- pt3[i] = (uint64_t)(sc->acpi_wakephys + (2 * PAGE_SIZE));
+ pt3[i] = (uint64_t)pt2pa;
pt3[i] |= PG_V | PG_RW | PG_U;
/* The level 2 page slots are mapped with 2MB pages for 1GB. */
pt2[i] = i * (2 * 1024 * 1024);
pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
}
-#endif
+#endif /* !__amd64__ */
if (bootverbose)
device_printf(sc->acpi_dev, "wakeup code va %#jx pa %#jx\n",
diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c
index 1d99efc..a917cc4 100644
--- a/sys/x86/x86/mca.c
+++ b/sys/x86/x86/mca.c
@@ -247,7 +247,7 @@ mca_error_mmtype(uint16_t mca_error)
return ("???");
}
-static int __nonnull(1)
+static int
mca_mute(const struct mca_record *rec)
{
@@ -276,7 +276,7 @@ mca_mute(const struct mca_record *rec)
}
/* Dump details about a single machine check. */
-static void __nonnull(1)
+static void
mca_log(const struct mca_record *rec)
{
uint16_t mca_error;
@@ -415,7 +415,7 @@ mca_log(const struct mca_record *rec)
printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
}
-static int __nonnull(2)
+static int
mca_check_status(int bank, struct mca_record *rec)
{
uint64_t status;
@@ -482,7 +482,7 @@ mca_refill(void *context, int pending)
mca_fill_freelist();
}
-static void __nonnull(2)
+static void
mca_record_entry(enum scan_mode mode, const struct mca_record *record)
{
struct mca_internal *rec;
OpenPOWER on IntegriCloud