summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/initcpu.c13
-rw-r--r--sys/amd64/amd64/pmap.c89
-rw-r--r--sys/amd64/include/cpufunc.h7
-rw-r--r--sys/amd64/linux/linux_sysvec.c15
-rw-r--r--sys/amd64/vmm/vmm_dev.c2
-rw-r--r--sys/boot/common/reloc_elf.c1
-rw-r--r--sys/boot/efi/boot1/boot_module.h6
-rw-r--r--sys/boot/efi/include/efiapi.h1
-rw-r--r--sys/boot/forth/loader.conf1
-rw-r--r--sys/boot/libstand32/Makefile188
-rw-r--r--sys/boot/userboot/libstand/Makefile164
-rw-r--r--sys/cam/cam_ccb.h14
-rw-r--r--sys/cam/cam_periph.c5
-rw-r--r--sys/cam/ctl/ctl.c1598
-rw-r--r--sys/cam/ctl/ctl.h10
-rw-r--r--sys/cam/ctl/ctl_backend.c73
-rw-r--r--sys/cam/ctl/ctl_backend.h63
-rw-r--r--sys/cam/ctl/ctl_backend_block.c83
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c880
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c4
-rw-r--r--sys/cam/ctl/ctl_error.c14
-rw-r--r--sys/cam/ctl/ctl_error.h1
-rw-r--r--sys/cam/ctl/ctl_frontend.c77
-rw-r--r--sys/cam/ctl/ctl_frontend.h17
-rw-r--r--sys/cam/ctl/ctl_frontend_cam_sim.c71
-rw-r--r--sys/cam/ctl/ctl_frontend_ioctl.c51
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.c45
-rw-r--r--sys/cam/ctl/ctl_ha.c2
-rw-r--r--sys/cam/ctl/ctl_io.h20
-rw-r--r--sys/cam/ctl/ctl_ioctl.h97
-rw-r--r--sys/cam/ctl/ctl_private.h12
-rw-r--r--sys/cam/ctl/ctl_tpc.c159
-rw-r--r--sys/cam/ctl/ctl_tpc_local.c37
-rw-r--r--sys/cam/ctl/ctl_util.c4
-rw-r--r--sys/cam/ctl/ctl_util.h4
-rw-r--r--sys/cam/ctl/scsi_ctl.c153
-rw-r--r--sys/cam/scsi/scsi_all.c66
-rw-r--r--sys/cam/scsi/scsi_all.h30
-rw-r--r--sys/cam/scsi/scsi_ch.c6
-rw-r--r--sys/cam/scsi/scsi_pass.c2
-rw-r--r--sys/compat/freebsd32/freebsd32_misc.c3
-rw-r--r--sys/conf/NOTES16
-rw-r--r--sys/conf/options1
-rw-r--r--sys/dev/age/if_age.c2
-rw-r--r--sys/dev/ahci/ahci.c21
-rw-r--r--sys/dev/ahci/ahci.h4
-rw-r--r--sys/dev/ahci/ahci_pci.c11
-rw-r--r--sys/dev/alc/if_alc.c34
-rw-r--r--sys/dev/alc/if_alcreg.h3
-rw-r--r--sys/dev/alc/if_alcvar.h3
-rw-r--r--sys/dev/ale/if_ale.c2
-rw-r--r--sys/dev/amdsbwd/amdsbwd.c4
-rw-r--r--sys/dev/arcmsr/arcmsr.c37
-rw-r--r--sys/dev/cxgbe/t4_main.c1
-rw-r--r--sys/dev/cxgbe/t4_sge.c5
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c20
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c29
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c3
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c45
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c136
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h8
-rw-r--r--sys/dev/hpt27xx/hpt27xx_os_bsd.c8
-rw-r--r--sys/dev/hptmv/entry.c4
-rw-r--r--sys/dev/hptnr/hptnr_os_bsd.c8
-rw-r--r--sys/dev/hptrr/hptrr_os_bsd.c4
-rw-r--r--sys/dev/iir/iir.c6
-rw-r--r--sys/dev/isci/isci_controller.c10
-rw-r--r--sys/dev/isci/isci_io_request.c18
-rw-r--r--sys/dev/isci/isci_task_request.c5
-rw-r--r--sys/dev/ixgbe/if_ix.c1
-rw-r--r--sys/dev/jme/if_jme.c2
-rw-r--r--sys/dev/kbd/kbd.c2
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c26
-rw-r--r--sys/dev/mmc/mmc.c22
-rw-r--r--sys/dev/mmc/mmcreg.h9
-rw-r--r--sys/dev/mmc/mmcsd.c8
-rw-r--r--sys/dev/mpr/mpr_sas.c7
-rw-r--r--sys/dev/mpr/mpr_sas_lsi.c15
-rw-r--r--sys/dev/msk/if_msk.c2
-rw-r--r--sys/dev/nand/nand_geom.c2
-rw-r--r--sys/dev/ntb/if_ntb/if_ntb.c5
-rw-r--r--sys/dev/nvd/nvd.c6
-rw-r--r--sys/dev/pci/pci.c7
-rw-r--r--sys/dev/ppbus/vpo.c16
-rw-r--r--sys/dev/qlxgbe/ql_def.h15
-rw-r--r--sys/dev/qlxgbe/ql_glbl.h5
-rw-r--r--sys/dev/qlxgbe/ql_hw.c71
-rw-r--r--sys/dev/qlxgbe/ql_hw.h1
-rw-r--r--sys/dev/qlxgbe/ql_isr.c32
-rw-r--r--sys/dev/qlxgbe/ql_os.c512
-rw-r--r--sys/dev/qlxgbe/ql_os.h4
-rw-r--r--sys/dev/qlxgbe/ql_ver.h2
-rw-r--r--sys/dev/rl/if_rl.c16
-rw-r--r--sys/dev/sdhci/sdhci.c199
-rw-r--r--sys/dev/sdhci/sdhci.h25
-rw-r--r--sys/dev/sdhci/sdhci_if.m6
-rw-r--r--sys/dev/sdhci/sdhci_pci.c50
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.c24
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.h2
-rw-r--r--sys/dev/sfxge/sfxge.c14
-rw-r--r--sys/dev/sfxge/sfxge.h3
-rw-r--r--sys/dev/sfxge/sfxge_port.c87
-rw-r--r--sys/dev/sfxge/sfxge_tx.c14
-rw-r--r--sys/dev/sound/pci/als4000.c4
-rw-r--r--sys/dev/sound/pci/cs4281.c11
-rw-r--r--sys/dev/sound/pci/hda/hdaa_patches.c9
-rw-r--r--sys/dev/sound/pci/hda/hdac.h1
-rw-r--r--sys/dev/sound/pci/hda/hdacc.c1
-rw-r--r--sys/dev/sound/pci/vibes.c15
-rw-r--r--sys/dev/stge/if_stge.c2
-rw-r--r--sys/dev/tws/tws.c4
-rw-r--r--sys/dev/usb/usb_hub.c25
-rw-r--r--sys/dev/usb/usb_process.c3
-rw-r--r--sys/dev/vte/if_vte.c2
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c39
-rw-r--r--sys/fs/pseudofs/pseudofs_vncache.c13
-rw-r--r--sys/fs/tmpfs/tmpfs.h289
-rw-r--r--sys/fs/tmpfs/tmpfs_fifoops.c28
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c220
-rw-r--r--sys/fs/tmpfs/tmpfs_vfsops.c108
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c257
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.h1
-rw-r--r--sys/geom/geom_disk.c12
-rw-r--r--sys/geom/geom_disk.h5
-rw-r--r--sys/geom/multipath/g_multipath.c8
-rw-r--r--sys/geom/vinum/geom_vinum_state.c2
-rw-r--r--sys/i386/i386/initcpu.c12
-rw-r--r--sys/i386/i386/machdep.c3
-rw-r--r--sys/i386/i386/pmap.c101
-rw-r--r--sys/i386/i386/vm_machdep.c6
-rw-r--r--sys/i386/include/cpufunc.h7
-rw-r--r--sys/i386/isa/npx.c10
-rw-r--r--sys/kern/imgact_elf.c33
-rw-r--r--sys/kern/kern_procctl.c2
-rw-r--r--sys/kern/kern_switch.c15
-rw-r--r--sys/kern/sched_4bsd.c4
-rw-r--r--sys/kern/sched_ule.c4
-rw-r--r--sys/kern/subr_unit.c12
-rw-r--r--sys/kern/uipc_mbuf2.c2
-rw-r--r--sys/kern/vfs_syscalls.c4
-rw-r--r--sys/modules/cam/Makefile1
-rw-r--r--sys/net/ieee8023ad_lacp.c7
-rw-r--r--sys/net/if_bridge.c6
-rw-r--r--sys/net/if_bridgevar.h1
-rw-r--r--sys/net/if_lagg.c2
-rw-r--r--sys/net/if_media.c1
-rw-r--r--sys/netgraph/ng_mppc.c7
-rw-r--r--sys/netinet/tcp_output.c4
-rw-r--r--sys/ofed/drivers/net/mlx4/main.c68
-rw-r--r--sys/sys/unistd.h2
-rw-r--r--sys/vm/vm_map.c2
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/x86/x86/mca.c8
153 files changed, 4022 insertions, 3113 deletions
diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c
index 66e465e..1b358bc 100644
--- a/sys/amd64/amd64/initcpu.c
+++ b/sys/amd64/amd64/initcpu.c
@@ -253,12 +253,17 @@ initializecpucache(void)
* CPUID_SS feature even though the native CPU supports it.
*/
TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
- if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
+ if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
cpu_feature &= ~CPUID_CLFSH;
+ cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
+ }
+
/*
- * Allow to disable CLFLUSH feature manually by
- * hw.clflush_disable tunable.
+ * The kernel's use of CLFLUSH{,OPT} can be disabled manually
+ * by setting the hw.clflush_disable tunable.
*/
- if (hw_clflush_disable == 1)
+ if (hw_clflush_disable == 1) {
cpu_feature &= ~CPUID_CLFSH;
+ cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
+ }
}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index d7c5213..1f6aaf5 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -912,7 +912,12 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
virtual_avail = va;
- /* Initialize the PAT MSR. */
+ /*
+ * Initialize the PAT MSR.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment.
+ */
pmap_init_pat();
/* Initialize TLB Context Id. */
@@ -1789,9 +1794,8 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
if ((cpu_feature & CPUID_SS) != 0 && !force)
; /* If "Self Snoop" is supported and allowed, do nothing. */
- else if ((cpu_feature & CPUID_CLFSH) != 0 &&
+ else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
-
/*
* XXX: Some CPUs fault, hang, or trash the local APIC
* registers if we use CLFLUSH on the local APIC
@@ -1802,16 +1806,29 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
return;
/*
- * Otherwise, do per-cache line flush. Use the mfence
+ * Otherwise, do per-cache line flush. Use the sfence
* instruction to insure that previous stores are
* included in the write-back. The processor
* propagates flush to other processors in the cache
* coherence domain.
*/
- mfence();
+ sfence();
+ for (; sva < eva; sva += cpu_clflush_line_size)
+ clflushopt(sva);
+ sfence();
+ } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
+ eva - sva < PMAP_CLFLUSH_THRESHOLD) {
+ if (pmap_kextract(sva) == lapic_paddr)
+ return;
+ /*
+ * Writes are ordered by CLFLUSH on Intel CPUs.
+ */
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
for (; sva < eva; sva += cpu_clflush_line_size)
clflush(sva);
- mfence();
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
} else {
/*
@@ -1835,19 +1852,31 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
{
vm_offset_t daddr, eva;
int i;
+ bool useclflushopt;
+ useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
- (cpu_feature & CPUID_CLFSH) == 0)
+ ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
pmap_invalidate_cache();
else {
- mfence();
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
for (i = 0; i < count; i++) {
daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
eva = daddr + PAGE_SIZE;
- for (; daddr < eva; daddr += cpu_clflush_line_size)
- clflush(daddr);
+ for (; daddr < eva; daddr += cpu_clflush_line_size) {
+ if (useclflushopt)
+ clflushopt(daddr);
+ else
+ clflush(daddr);
+ }
}
- mfence();
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
}
}
@@ -3348,6 +3377,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
vm_paddr_t mptepa;
vm_page_t mpte;
struct spglist free;
+ vm_offset_t sva;
int PG_PTE_CACHE;
PG_G = pmap_global_bit(pmap);
@@ -3386,9 +3416,9 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
- pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
- lockp);
- pmap_invalidate_page(pmap, trunc_2mpage(va));
+ sva = trunc_2mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free, lockp);
+ pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -3531,11 +3561,23 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
/*
- * Machines that don't support invlpg, also don't support
- * PG_G.
+ * When workaround_erratum383 is false, a promotion to a 2M
+ * page mapping does not invalidate the 512 4K page mappings
+ * from the TLB. Consequently, at this point, the TLB may
+ * hold both 4K and 2M page mappings. Therefore, the entire
+ * range of addresses must be invalidated here. In contrast,
+ * when workaround_erratum383 is true, a promotion does
+ * invalidate the 512 4K page mappings, and so a single INVLPG
+ * suffices to invalidate the 2M page mapping.
*/
- if (oldpde & PG_G)
- pmap_invalidate_page(kernel_pmap, sva);
+ if ((oldpde & PG_G) != 0) {
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ }
+
pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
if (oldpde & PG_MANAGED) {
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
@@ -3890,9 +3932,14 @@ retry:
if (newpde != oldpde) {
if (!atomic_cmpset_long(pde, oldpde, newpde))
goto retry;
- if (oldpde & PG_G)
- pmap_invalidate_page(pmap, sva);
- else
+ if (oldpde & PG_G) {
+ /* See pmap_remove_pde() for explanation. */
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ } else
anychanged = TRUE;
}
return (anychanged);
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index a3d82e8..dacfe32 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -327,6 +327,13 @@ mfence(void)
}
static __inline void
+sfence(void)
+{
+
+ __asm __volatile("sfence" : : : "memory");
+}
+
+static __inline void
ia32_pause(void)
{
__asm __volatile("pause");
diff --git a/sys/amd64/linux/linux_sysvec.c b/sys/amd64/linux/linux_sysvec.c
index 13f9199..dba213b 100644
--- a/sys/amd64/linux/linux_sysvec.c
+++ b/sys/amd64/linux/linux_sysvec.c
@@ -718,7 +718,7 @@ exec_linux_imgact_try(struct image_params *imgp)
{
const char *head = (const char *)imgp->image_header;
char *rpath;
- int error = -1, len;
+ int error = -1;
/*
* The interpreter for shell scripts run from a linux binary needs
@@ -736,17 +736,12 @@ exec_linux_imgact_try(struct image_params *imgp)
linux_emul_convpath(FIRST_THREAD_IN_PROC(imgp->proc),
imgp->interpreter_name, UIO_SYSSPACE,
&rpath, 0, AT_FDCWD);
- if (rpath != NULL) {
- len = strlen(rpath) + 1;
-
- if (len <= MAXSHELLCMDLEN)
- memcpy(imgp->interpreter_name,
- rpath, len);
- free(rpath, M_TEMP);
- }
+ if (rpath != NULL)
+ imgp->args->fname_buf =
+ imgp->interpreter_name = rpath;
}
}
- return(error);
+ return (error);
}
#define LINUX_VSYSCALL_START (-10UL << 20)
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
index 5cb4150..53a8bdc 100644
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -258,7 +258,7 @@ alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg)
if (VM_MEMSEG_NAME(mseg)) {
sysmem = false;
name = malloc(SPECNAMELEN + 1, M_VMMDEV, M_WAITOK);
- error = copystr(VM_MEMSEG_NAME(mseg), name, SPECNAMELEN + 1, 0);
+ error = copystr(mseg->name, name, SPECNAMELEN + 1, 0);
if (error)
goto done;
}
diff --git a/sys/boot/common/reloc_elf.c b/sys/boot/common/reloc_elf.c
index 2b60d18..6d4a00f 100644
--- a/sys/boot/common/reloc_elf.c
+++ b/sys/boot/common/reloc_elf.c
@@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/elf.h>
-#include <errno.h>
#include <stand.h>
#define FREEBSD_ELF
diff --git a/sys/boot/efi/boot1/boot_module.h b/sys/boot/efi/boot1/boot_module.h
index 296d5a6..3a6b827 100644
--- a/sys/boot/efi/boot1/boot_module.h
+++ b/sys/boot/efi/boot1/boot_module.h
@@ -64,7 +64,7 @@ typedef struct boot_module_t
const char *name;
/* init is the optional initialiser for the module. */
- void (*init)();
+ void (*init)(void);
/*
* probe checks to see if the module can handle dev.
@@ -89,10 +89,10 @@ typedef struct boot_module_t
void **buf, size_t *bufsize);
/* status outputs information about the probed devices. */
- void (*status)();
+ void (*status)(void);
/* valid devices as found by probe. */
- dev_info_t *(*devices)();
+ dev_info_t *(*devices)(void);
} boot_module_t;
/* Standard boot modules. */
diff --git a/sys/boot/efi/include/efiapi.h b/sys/boot/efi/include/efiapi.h
index 25e40a2..1ce7f62 100644
--- a/sys/boot/efi/include/efiapi.h
+++ b/sys/boot/efi/include/efiapi.h
@@ -536,6 +536,7 @@ EFI_STATUS
typedef
EFI_STATUS
(EFIAPI *EFI_RESERVED_SERVICE) (
+ VOID
);
typedef
diff --git a/sys/boot/forth/loader.conf b/sys/boot/forth/loader.conf
index 7cf0bcf..afc63f0 100644
--- a/sys/boot/forth/loader.conf
+++ b/sys/boot/forth/loader.conf
@@ -84,6 +84,7 @@ module_path="/boot/modules" # Set the module search path
#prompt="\\${interpret}" # Set the command prompt
#root_disk_unit="0" # Force the root disk unit number
#rootdev="disk1s1a" # Set the root filesystem
+#dumpdev="disk1s1b" # Set a dump device early in the boot process
#tftp.blksize="1428" # Set the RFC 2348 TFTP block size.
# If the TFTP server does not support RFC 2348,
# the block size is set to 512. If the value
diff --git a/sys/boot/libstand32/Makefile b/sys/boot/libstand32/Makefile
index c2bb701..be7a4f4 100644
--- a/sys/boot/libstand32/Makefile
+++ b/sys/boot/libstand32/Makefile
@@ -1,189 +1,27 @@
# $FreeBSD$
-# Originally from $NetBSD: Makefile,v 1.21 1997/10/26 22:08:38 lukem Exp $
-#
-# Notes:
-# - We don't use the libc strerror/sys_errlist because the string table is
-# quite large.
-#
-
-MAN=
.include <bsd.own.mk>
-MK_SSP= no
-
-S= ${.CURDIR}/../../../lib/libstand
-
-.PATH: ${S}
-LIB= stand
-INTERNALLIB=
-NO_PROFILE=
-NO_PIC=
-
-WARNS?= 0
-
-CFLAGS+= -ffreestanding -Wformat
-CFLAGS+= -I${S}
-
-.if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64"
-CFLAGS.gcc+= -mpreferred-stack-boundary=2
-CFLAGS+= -mno-mmx -mno-3dnow -mno-sse -mno-sse2 -mno-sse3 -msoft-float
-.endif
-.if ${MACHINE} == "pc98"
-CFLAGS+= -Os
-.endif
-.if ${MACHINE_CPUARCH} == "powerpc"
-CFLAGS+= -msoft-float -D_STANDALONE -DNETIF_DEBUG
-.endif
-.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "powerpc64"
-CFLAGS+= -m32 -I.
-.endif
-.if ${MACHINE_CPUARCH} == "arm"
-CFLAGS+= -msoft-float -D_STANDALONE
-.endif
-.if ${MACHINE_CPUARCH} == "mips"
-CFLAGS+= -G0 -fno-pic -mno-abicalls
-.endif
-
-# standalone components and stuff we have modified locally
-SRCS+= gzguts.h zutil.h __main.c assert.c bcd.c environment.c getopt.c gets.c \
- globals.c pager.c printf.c strdup.c strerror.c strtol.c strtoul.c random.c \
- sbrk.c twiddle.c zalloc.c zalloc_malloc.c
-
-# private (pruned) versions of libc string functions
-SRCS+= strcasecmp.c
-
-LIBC= ${S}/../libc
-
-.PATH: ${LIBC}/net
-
-SRCS+= ntoh.c
-
-# string functions from libc
-.PATH: ${LIBC}/string
-.if ${MACHINE_CPUARCH} != "ia64"
-SRCS+= bcmp.c bcopy.c bzero.c ffs.c memccpy.c memchr.c memcmp.c memcpy.c \
- memmove.c memset.c qdivrem.c strcat.c strchr.c strcmp.c strcpy.c \
- strcspn.c strlcat.c strlcpy.c strlen.c strncat.c strncmp.c strncpy.c \
- strpbrk.c strrchr.c strsep.c strspn.c strstr.c strtok.c swab.c
-.endif
-.if ${MACHINE_CPUARCH} == "arm"
-.PATH: ${LIBC}/arm/gen
-
-.if ${MK_ARM_EABI} == "no"
-SRCS+= divsi3.S
-.else
-# Compiler support functions
-.PATH: ${.CURDIR}/../../../contrib/compiler-rt/lib/
-# __clzsi2 and ctzsi2 for various builtin functions
-SRCS+= clzsi2.c ctzsi2.c
-# Divide and modulus functions called by the compiler
-SRCS+= divmoddi4.c divmodsi4.c divdi3.c divsi3.c moddi3.c modsi3.c
-SRCS+= udivmoddi4.c udivmodsi4.c udivdi3.c udivsi3.c umoddi3.c umodsi3.c
-
-.PATH: ${.CURDIR}/../../../contrib/compiler-rt/lib/arm/
-SRCS+= aeabi_idivmod.S aeabi_ldivmod.S aeabi_uidivmod.S aeabi_uldivmod.S
-SRCS+= aeabi_memcmp.S aeabi_memcpy.S aeabi_memmove.S aeabi_memset.S
-.endif
-
-.endif
-.if ${MACHINE_CPUARCH} == "ia64"
-.PATH: ${LIBC}/ia64/string
-SRCS+= bcmp.c bcopy.S bzero.S ffs.S memccpy.c memchr.c memcmp.c memcpy.S \
- memmove.S memset.c strcat.c strchr.c strcmp.c strcpy.c strcspn.c \
- strlen.c strncat.c strncmp.c strncpy.c strpbrk.c strrchr.c strsep.c \
- strspn.c strstr.c strtok.c swab.c
-
-.PATH: ${LIBC}/ia64/gen
-SRCS+= __divdi3.S __divsi3.S __moddi3.S __modsi3.S
-SRCS+= __udivdi3.S __udivsi3.S __umoddi3.S __umodsi3.S
-.endif
-.if ${MACHINE_CPUARCH} == "powerpc"
-.PATH: ${LIBC}/quad
-SRCS+= ashldi3.c ashrdi3.c
-.PATH: ${LIBC}/powerpc/gen
-SRCS+= syncicache.c
-.endif
-# uuid functions from libc
-.PATH: ${LIBC}/uuid
-SRCS+= uuid_equal.c uuid_is_nil.c
+LIBSTAND_SRC= ${.CURDIR}/../../../lib/libstand
-# _setjmp/_longjmp
.if ${MACHINE_CPUARCH} == "amd64"
-.PATH: ${S}/i386
-.elif ${MACHINE_ARCH} == "powerpc64"
-.PATH: ${S}/powerpc
+LIBSTAND_CPUARCH=i386
.else
-.PATH: ${S}/${MACHINE_CPUARCH}
+LIBSTAND_CPUARCH=${MACHINE_CPUARCH}
.endif
-SRCS+= _setjmp.S
-
-# decompression functionality from libbz2
-# NOTE: to actually test this functionality after libbz2 upgrade compile
-# loader(8) with LOADER_BZIP2_SUPPORT defined
-.PATH: ${.CURDIR}/../../../contrib/bzip2
-CFLAGS+= -DBZ_NO_STDIO -DBZ_NO_COMPRESS
-SRCS+= libstand_bzlib_private.h
-
-.for file in bzlib.c crctable.c decompress.c huffman.c randtable.c
-SRCS+= _${file}
-CLEANFILES+= _${file}
-
-_${file}: ${file}
- sed "s|bzlib_private\.h|libstand_bzlib_private.h|" ${.ALLSRC} > ${.TARGET}
-.endfor
-
-CLEANFILES+= libstand_bzlib_private.h
-libstand_bzlib_private.h: bzlib_private.h
- sed -e 's|<stdlib.h>|"stand.h"|' \
- ${.ALLSRC} > ${.TARGET}
-
-# decompression functionality from libz
-.PATH: ${S}/../libz
-CFLAGS+=-DHAVE_MEMCPY -I${S}/../libz
-SRCS+= adler32.c crc32.c libstand_zutil.h libstand_gzguts.h
-
-.for file in infback.c inffast.c inflate.c inftrees.c zutil.c
-SRCS+= _${file}
-CLEANFILES+= _${file}
-
-_${file}: ${file}
- sed -e "s|zutil\.h|libstand_zutil.h|" \
- -e "s|gzguts\.h|libstand_gzguts.h|" \
- ${.ALLSRC} > ${.TARGET}
-.endfor
-
-# depend on stand.h being able to be included multiple times
-.for file in zutil.h gzguts.h
-CLEANFILES+= libstand_${file}
-libstand_${file}: ${file}
- sed -e 's|<fcntl.h>|"stand.h"|' \
- -e 's|<stddef.h>|"stand.h"|' \
- -e 's|<string.h>|"stand.h"|' \
- -e 's|<stdio.h>|"stand.h"|' \
- -e 's|<stdlib.h>|"stand.h"|' \
- ${.ALLSRC} > ${.TARGET}
-.endfor
-
-# io routines
-SRCS+= closeall.c dev.c ioctl.c nullfs.c stat.c \
- fstat.c close.c lseek.c open.c read.c write.c readdir.c
-
-# network routines
-SRCS+= arp.c ether.c inet_ntoa.c in_cksum.c net.c udp.c netif.c rpc.c
-
-# network info services:
-SRCS+= bootp.c rarp.c bootparam.c
+LIBC_SRC= ${LIBSTAND_SRC}/../libc
+INTERNALLIB=
+INCS=
+MAN=
+.PATH: ${LIBSTAND_SRC}
-# boot filesystems
-SRCS+= ufs.c nfs.c cd9660.c tftp.c gzipfs.c bzipfs.c
-SRCS+= dosfs.c ext2fs.c
-SRCS+= splitfs.c
-.if ${MK_NAND} != "no"
-SRCS+= nandfs.c
+.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "powerpc64"
+CFLAGS+= -m32 -I.
.endif
-.include <bsd.lib.mk>
+WARNS?= 0
+
+.include "${LIBSTAND_SRC}/Makefile"
.if ${MACHINE_CPUARCH} == "amd64"
CLEANFILES+= machine
diff --git a/sys/boot/userboot/libstand/Makefile b/sys/boot/userboot/libstand/Makefile
index 53d6e85..11bfdee 100644
--- a/sys/boot/userboot/libstand/Makefile
+++ b/sys/boot/userboot/libstand/Makefile
@@ -1,166 +1,12 @@
# $FreeBSD$
-# Originally from $NetBSD: Makefile,v 1.21 1997/10/26 22:08:38 lukem Exp $
-#
-# Notes:
-# - We don't use the libc strerror/sys_errlist because the string table is
-# quite large.
-#
-
-MAN=
.include <bsd.own.mk>
-MK_SSP= no
-S= ${.CURDIR}/../../../../lib/libstand
+LIBSTAND_SRC= ${.CURDIR}/../../../../lib/libstand
-.PATH: ${S}
-LIB= stand
INTERNALLIB=
-NO_PROFILE=
-NO_PIC=
-
-WARNS?= 0
-
-CFLAGS+= -ffreestanding -Wformat -fPIC
-CFLAGS+= -I${.CURDIR}/../../../../lib/libstand
-
-.if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64"
-CFLAGS+= -mno-mmx -mno-3dnow -mno-sse -mno-sse2
-.endif
-.if ${MACHINE_CPUARCH} == "i386"
-CFLAGS.gcc+= -mpreferred-stack-boundary=2
-CFLAGS+= -mno-sse3
-.endif
-.if ${MACHINE} == "pc98"
-CFLAGS+= -Os
-.endif
-.if ${MACHINE_CPUARCH} == "powerpc"
-CFLAGS+= -msoft-float -D_STANDALONE -DNETIF_DEBUG
-.endif
-.if ${MACHINE_CPUARCH} == "arm"
-CFLAGS+= -msoft-float -D_STANDALONE
-.endif
-
-# standalone components and stuff we have modified locally
-SRCS+= gzguts.h zutil.h __main.c assert.c bcd.c environment.c getopt.c gets.c \
- globals.c pager.c printf.c strdup.c strerror.c strtol.c random.c \
- sbrk.c twiddle.c zalloc.c zalloc_malloc.c
-
-# private (pruned) versions of libc string functions
-SRCS+= strcasecmp.c
-
-LIBC= ${.CURDIR}/../../../../lib/libc
-
-.PATH: ${LIBC}/net
-
-SRCS+= ntoh.c
-
-# string functions from libc
-.PATH: ${LIBC}/string
-.if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "powerpc" || \
- ${MACHINE_CPUARCH} == "sparc64" || ${MACHINE_CPUARCH} == "amd64" || \
- ${MACHINE_CPUARCH} == "arm"
-SRCS+= bcmp.c bcopy.c bzero.c ffs.c memccpy.c memchr.c memcmp.c memcpy.c \
- memmove.c memset.c qdivrem.c strcat.c strchr.c strcmp.c strcpy.c \
- strcspn.c strlen.c strncat.c strncmp.c strncpy.c strpbrk.c \
- strrchr.c strsep.c strspn.c strstr.c strtok.c swab.c
-.endif
-.if ${MACHINE_CPUARCH} == "arm"
-.PATH: ${LIBC}/arm/gen
-SRCS+= divsi3.S
-.endif
-.if ${MACHINE_CPUARCH} == "ia64"
-.PATH: ${LIBC}/ia64/string
-SRCS+= bcmp.c bcopy.S bzero.S ffs.S index.c memccpy.c memchr.c memcmp.c \
- memcpy.S memmove.S memset.c rindex.c strcat.c strchr.c \
- strcmp.c strcpy.c strcspn.c strlen.c \
- strncat.c strncmp.c strncpy.c strpbrk.c strrchr.c strsep.c \
- strspn.c strstr.c strtok.c swab.c
-
-.PATH: ${LIBC}/ia64/gen
-SRCS+= __divdi3.S __divsi3.S __moddi3.S __modsi3.S
-SRCS+= __udivdi3.S __udivsi3.S __umoddi3.S __umodsi3.S
-.endif
-.if ${MACHINE_CPUARCH} == "powerpc"
-.PATH: ${LIBC}/libc/quad
-SRCS+= ashldi3.c ashrdi3.c
-.PATH: ${LIBC}/powerpc/gen
-SRCS+= syncicache.c
-.endif
-
-# uuid functions from libc
-.PATH: ${LIBC}/uuid
-SRCS+= uuid_equal.c uuid_is_nil.c
-
-# _setjmp/_longjmp
-.if ${MACHINE_CPUARCH} == "amd64"
-.PATH: ${S}/amd64
-.elif ${MACHINE_ARCH} == "powerpc64"
-.PATH: ${S}/powerpc
-.else
-.PATH: ${S}/${MACHINE_CPUARCH}
-.endif
-SRCS+= _setjmp.S
-
-# decompression functionality from libbz2
-# NOTE: to actually test this functionality after libbz2 upgrade compile
-# loader(8) with LOADER_BZIP2_SUPPORT defined
-.PATH: ${.CURDIR}/../../../../contrib/bzip2
-CFLAGS+= -DBZ_NO_STDIO -DBZ_NO_COMPRESS
-SRCS+= libstand_bzlib_private.h
-
-.for file in bzlib.c crctable.c decompress.c huffman.c randtable.c
-SRCS+= _${file}
-CLEANFILES+= _${file}
-
-_${file}: ${file}
- sed "s|bzlib_private\.h|libstand_bzlib_private.h|" ${.ALLSRC} > ${.TARGET}
-.endfor
-
-CLEANFILES+= libstand_bzlib_private.h
-libstand_bzlib_private.h: bzlib_private.h
- sed -e 's|<stdlib.h>|"stand.h"|' \
- ${.ALLSRC} > ${.TARGET}
-
-# decompression functionality from libz
-.PATH: ${.CURDIR}/../../../../lib/libz
-CFLAGS+=-DHAVE_MEMCPY -I${.CURDIR}/../../../../lib/libz
-SRCS+= adler32.c crc32.c libstand_zutil.h libstand_gzguts.h
-
-.for file in infback.c inffast.c inflate.c inftrees.c zutil.c
-SRCS+= _${file}
-CLEANFILES+= _${file}
-
-_${file}: ${file}
- sed -e "s|zutil\.h|libstand_zutil.h|" \
- -e "s|gzguts\.h|libstand_gzguts.h|" ${.ALLSRC} > ${.TARGET}
-.endfor
-
-# depend on stand.h being able to be included multiple times
-.for file in zutil.h gzguts.h
-CLEANFILES+= libstand_${file}
-libstand_${file}: ${file}
- sed -e 's|<fcntl.h>|"stand.h"|' \
- -e 's|<stddef.h>|"stand.h"|' \
- -e 's|<string.h>|"stand.h"|' \
- -e 's|<stdio.h>|"stand.h"|' \
- -e 's|<stdlib.h>|"stand.h"|' \
- ${.ALLSRC} > ${.TARGET}
-.endfor
-
-# io routines
-SRCS+= closeall.c dev.c ioctl.c nullfs.c stat.c \
- fstat.c close.c lseek.c open.c read.c write.c readdir.c
-
-# network routines
-SRCS+= arp.c ether.c inet_ntoa.c in_cksum.c net.c udp.c netif.c rpc.c
-
-# network info services:
-SRCS+= bootp.c rarp.c bootparam.c
-
-# boot filesystems
-SRCS+= ufs.c nfs.c cd9660.c tftp.c gzipfs.c bzipfs.c
-SRCS+= dosfs.c ext2fs.c
-SRCS+= splitfs.c
+INCS=
+MAN=
+.PATH: ${LIBSTAND_SRC}
-.include <bsd.lib.mk>
+.include "${LIBSTAND_SRC}/Makefile"
diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h
index 1d56ac7..1cbb082 100644
--- a/sys/cam/cam_ccb.h
+++ b/sys/cam/cam_ccb.h
@@ -727,6 +727,13 @@ struct ccb_scsiio {
u_int init_id; /* initiator id of who selected */
};
+static __inline uint8_t *
+scsiio_cdb_ptr(struct ccb_scsiio *ccb)
+{
+ return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
+ ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
+}
+
/*
* ATA I/O Request CCB used for the XPT_ATA_IO function code.
*/
@@ -760,6 +767,13 @@ struct ccb_accept_tio {
struct scsi_sense_data sense_data;
};
+static __inline uint8_t *
+atio_cdb_ptr(struct ccb_accept_tio *ccb)
+{
+ return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
+ ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes);
+}
+
/* Release SIM Queue */
struct ccb_relsim {
struct ccb_hdr ccb_h;
diff --git a/sys/cam/cam_periph.c b/sys/cam/cam_periph.c
index 9a59155..c540780 100644
--- a/sys/cam/cam_periph.c
+++ b/sys/cam/cam_periph.c
@@ -1909,10 +1909,7 @@ cam_periph_devctl_notify(union ccb *ccb)
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
sbuf_printf(&sb, "CDB=\"");
- if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
- scsi_cdb_sbuf(ccb->csio.cdb_io.cdb_ptr, &sb);
- else
- scsi_cdb_sbuf(ccb->csio.cdb_io.cdb_bytes, &sb);
+ scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
sbuf_printf(&sb, "\" ");
} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
sbuf_printf(&sb, "ACB=\"");
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 1a24641..1b3a57f 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
* Copyright (c) 2012 The FreeBSD Foundation
- * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed by Edward Tomasz Napierala
@@ -409,6 +409,9 @@ static int ctl_debug = CTL_DEBUG_NONE;
TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
&ctl_debug, 0, "Enabled debug flags");
+static int ctl_lun_map_size = 1024;
+SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN,
+ &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)");
/*
* Supported pages (0x00), Serial number (0x80), Device ID (0x83),
@@ -423,10 +426,10 @@ static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
static int ctl_init(void);
-void ctl_shutdown(void);
+static int ctl_shutdown(void);
static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
-static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
+static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
struct ctl_ooa *ooa_hdr,
struct ctl_ooa_entry *kern_entries);
@@ -436,7 +439,6 @@ static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
struct ctl_be_lun *be_lun);
static int ctl_free_lun(struct ctl_lun *lun);
static void ctl_create_lun(struct ctl_be_lun *be_lun);
-static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
static int ctl_do_mode_select(union ctl_io *io);
static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
@@ -447,7 +449,7 @@ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
struct scsi_per_res_out_parms* param);
static void ctl_pro_preempt_other(struct ctl_lun *lun,
union ctl_ha_msg *msg);
-static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
+static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io);
static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
@@ -520,6 +522,8 @@ static const struct ctl_cmd_entry *
ctl_validate_command(struct ctl_scsiio *ctsio);
static int ctl_cmd_applicable(uint8_t lun_type,
const struct ctl_cmd_entry *entry);
+static int ctl_ha_init(void);
+static int ctl_ha_shutdown(void);
static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
@@ -561,18 +565,60 @@ MODULE_VERSION(ctl, 1);
static struct ctl_frontend ha_frontend =
{
.name = "ha",
+ .init = ctl_ha_init,
+ .shutdown = ctl_ha_shutdown,
+};
+
+static int
+ctl_ha_init(void)
+{
+ struct ctl_softc *softc = control_softc;
+
+ if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
+ &softc->othersc_pool) != 0)
+ return (ENOMEM);
+ if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
+ ctl_pool_free(softc->othersc_pool);
+ return (EIO);
+ }
+ if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ ctl_ha_msg_destroy(softc);
+ ctl_pool_free(softc->othersc_pool);
+ return (EIO);
+ }
+ return (0);
+};
+
+static int
+ctl_ha_shutdown(void)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_port *port;
+
+ ctl_ha_msg_shutdown(softc);
+ if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS)
+ return (EIO);
+ if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
+ return (EIO);
+ ctl_pool_free(softc->othersc_pool);
+ while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) {
+ ctl_port_deregister(port);
+ free(port->port_name, M_CTL);
+ free(port, M_CTL);
+ }
+ return (0);
};
static void
ctl_ha_datamove(union ctl_io *io)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(io);
struct ctl_sg_entry *sgl;
union ctl_ha_msg msg;
uint32_t sg_entries_sent;
int do_sg_copy, i, j;
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
memset(&msg.dt, 0, sizeof(msg.dt));
msg.hdr.msg_type = CTL_MSG_DATAMOVE;
msg.hdr.original_sc = io->io_hdr.original_sc;
@@ -697,8 +743,6 @@ ctl_ha_done(union ctl_io *io)
msg.scsi.tag_num = io->scsiio.tag_num;
msg.scsi.tag_type = io->scsiio.tag_type;
msg.scsi.sense_len = io->scsiio.sense_len;
- msg.scsi.sense_residual = io->scsiio.sense_residual;
- msg.scsi.residual = io->scsiio.residual;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
io->scsiio.sense_len);
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
@@ -726,8 +770,6 @@ ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
ctsio->io_hdr.status = msg_info->hdr.status;
ctsio->scsi_status = msg_info->scsi.scsi_status;
ctsio->sense_len = msg_info->scsi.sense_len;
- ctsio->sense_residual = msg_info->scsi.sense_residual;
- ctsio->residual = msg_info->scsi.residual;
memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
msg_info->scsi.sense_len);
ctl_enqueue_isc((union ctl_io *)ctsio);
@@ -828,7 +870,7 @@ ctl_isc_announce_port(struct ctl_port *port)
return;
i = sizeof(msg->port) + strlen(port->port_name) + 1;
if (port->lun_map)
- i += sizeof(uint32_t) * CTL_MAX_LUNS;
+ i += port->lun_map_size * sizeof(uint32_t);
if (port->port_devid)
i += port->port_devid->len;
if (port->target_devid)
@@ -848,7 +890,7 @@ ctl_isc_announce_port(struct ctl_port *port)
"%d:%s", softc->ha_id, port->port_name) + 1;
i += msg->port.name_len;
if (port->lun_map) {
- msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS;
+ msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t);
memcpy(&msg->port.data[i], port->lun_map,
msg->port.lun_map_len);
i += msg->port.lun_map_len;
@@ -1026,27 +1068,27 @@ ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
mtx_lock(&softc->ctl_lock);
- if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
- (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) {
- mtx_lock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
- if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES &&
- msg->ua.ua_set)
- memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
- if (msg->ua.ua_all) {
- if (msg->ua.ua_set)
- ctl_est_ua_all(lun, iid, msg->ua.ua_type);
- else
- ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
- } else {
- if (msg->ua.ua_set)
- ctl_est_ua(lun, iid, msg->ua.ua_type);
- else
- ctl_clr_ua(lun, iid, msg->ua.ua_type);
- }
- mtx_unlock(&lun->lun_lock);
- } else
+ if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
+ return;
+ }
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set)
+ memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
+ if (msg->ua.ua_all) {
+ if (msg->ua.ua_set)
+ ctl_est_ua_all(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
+ } else {
+ if (msg->ua.ua_set)
+ ctl_est_ua(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua(lun, iid, msg->ua.ua_type);
+ }
+ mtx_unlock(&lun->lun_lock);
}
static void
@@ -1060,8 +1102,8 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
- ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
@@ -1076,7 +1118,7 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
mtx_unlock(&lun->lun_lock);
printf("%s: Received conflicting HA LUN %d\n",
- __func__, msg->hdr.nexus.targ_lun);
+ __func__, targ_lun);
return;
} else {
/* Record whether peer is primary. */
@@ -1110,7 +1152,7 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
mtx_unlock(&lun->lun_lock);
CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
- __func__, msg->hdr.nexus.targ_lun,
+ __func__, targ_lun,
(msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
"primary" : "secondary"));
@@ -1157,19 +1199,25 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
M_CTL);
i += msg->port.name_len;
if (msg->port.lun_map_len != 0) {
- if (port->lun_map == NULL)
- port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
+ if (port->lun_map == NULL ||
+ port->lun_map_size * sizeof(uint32_t) <
+ msg->port.lun_map_len) {
+ port->lun_map_size = 0;
+ free(port->lun_map, M_CTL);
+ port->lun_map = malloc(msg->port.lun_map_len,
M_CTL, M_WAITOK);
- memcpy(port->lun_map, &msg->port.data[i],
- sizeof(uint32_t) * CTL_MAX_LUNS);
+ }
+ memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len);
+ port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t);
i += msg->port.lun_map_len;
} else {
+ port->lun_map_size = 0;
free(port->lun_map, M_CTL);
port->lun_map = NULL;
}
if (msg->port.port_devid_len != 0) {
if (port->port_devid == NULL ||
- port->port_devid->len != msg->port.port_devid_len) {
+ port->port_devid->len < msg->port.port_devid_len) {
free(port->port_devid, M_CTL);
port->port_devid = malloc(sizeof(struct ctl_devid) +
msg->port.port_devid_len, M_CTL, M_WAITOK);
@@ -1184,7 +1232,7 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
}
if (msg->port.target_devid_len != 0) {
if (port->target_devid == NULL ||
- port->target_devid->len != msg->port.target_devid_len) {
+ port->target_devid->len < msg->port.target_devid_len) {
free(port->target_devid, M_CTL);
port->target_devid = malloc(sizeof(struct ctl_devid) +
msg->port.target_devid_len, M_CTL, M_WAITOK);
@@ -1199,7 +1247,7 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
}
if (msg->port.init_devid_len != 0) {
if (port->init_devid == NULL ||
- port->init_devid->len != msg->port.init_devid_len) {
+ port->init_devid->len < msg->port.init_devid_len) {
free(port->init_devid, M_CTL);
port->init_devid = malloc(sizeof(struct ctl_devid) +
msg->port.init_devid_len, M_CTL, M_WAITOK);
@@ -1220,7 +1268,7 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
}
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
@@ -1291,8 +1339,8 @@ ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
- ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
@@ -1490,13 +1538,12 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
- io->io_hdr.port_status = msg->scsi.fetd_status;
- io->scsiio.residual = msg->scsi.residual;
+ io->io_hdr.port_status = msg->scsi.port_status;
+ io->scsiio.kern_data_resid = msg->scsi.kern_data_resid;
if (msg->hdr.status != CTL_STATUS_NONE) {
io->io_hdr.status = msg->hdr.status;
io->scsiio.scsi_status = msg->scsi.scsi_status;
io->scsiio.sense_len = msg->scsi.sense_len;
- io->scsiio.sense_residual =msg->scsi.sense_residual;
memcpy(&io->scsiio.sense_data,
&msg->scsi.sense_data,
msg->scsi.sense_len);
@@ -1782,7 +1829,6 @@ ctl_init(void)
{
struct make_dev_args args;
struct ctl_softc *softc;
- void *other_pool;
int i, error;
softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
@@ -1796,7 +1842,8 @@ ctl_init(void)
args.mda_si_drv1 = softc;
error = make_dev_s(&args, &softc->dev, "cam/ctl");
if (error != 0) {
- free(control_softc, M_DEVBUF);
+ free(softc, M_DEVBUF);
+ control_softc = NULL;
return (error);
}
@@ -1808,7 +1855,7 @@ ctl_init(void)
if (softc->sysctl_tree == NULL) {
printf("%s: unable to allocate sysctl tree\n", __func__);
destroy_dev(softc->dev);
- free(control_softc, M_DEVBUF);
+ free(softc, M_DEVBUF);
control_softc = NULL;
return (ENOMEM);
}
@@ -1856,15 +1903,6 @@ ctl_init(void)
STAILQ_INIT(&softc->be_list);
ctl_tpc_init(softc);
- if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
- &other_pool) != 0)
- {
- printf("ctl: can't allocate %d entry other SC pool, "
- "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
- return (ENOMEM);
- }
- softc->othersc_pool = other_pool;
-
if (worker_threads <= 0)
worker_threads = max(1, mp_ncpus / 4);
if (worker_threads > CTL_MAX_THREADS)
@@ -1884,22 +1922,19 @@ ctl_init(void)
&softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
if (error != 0) {
printf("error creating CTL work thread!\n");
- ctl_pool_free(other_pool);
return (error);
}
}
error = kproc_kthread_add(ctl_lun_thread, softc,
- &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
+ &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun");
if (error != 0) {
printf("error creating CTL lun thread!\n");
- ctl_pool_free(other_pool);
return (error);
}
error = kproc_kthread_add(ctl_thresh_thread, softc,
- &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
+ &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh");
if (error != 0) {
printf("error creating CTL threshold thread!\n");
- ctl_pool_free(other_pool);
return (error);
}
@@ -1908,58 +1943,54 @@ ctl_init(void)
softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
if (softc->is_single == 0) {
- ctl_frontend_register(&ha_frontend);
- if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
- printf("ctl_init: ctl_ha_msg_init failed.\n");
- softc->is_single = 1;
- } else
- if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
- != CTL_HA_STATUS_SUCCESS) {
- printf("ctl_init: ctl_ha_msg_register failed.\n");
+ if (ctl_frontend_register(&ha_frontend) != 0)
softc->is_single = 1;
- }
}
return (0);
}
-void
+static int
ctl_shutdown(void)
{
struct ctl_softc *softc = control_softc;
- struct ctl_lun *lun, *next_lun;
+ int i;
- if (softc->is_single == 0) {
- ctl_ha_msg_shutdown(softc);
- if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
- != CTL_HA_STATUS_SUCCESS)
- printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
- if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
- printf("%s: ctl_ha_msg_destroy failed.\n", __func__);
+ if (softc->is_single == 0)
ctl_frontend_deregister(&ha_frontend);
- }
- mtx_lock(&softc->ctl_lock);
-
- STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun)
- ctl_free_lun(lun);
-
- mtx_unlock(&softc->ctl_lock);
+ destroy_dev(softc->dev);
-#if 0
- ctl_shutdown_thread(softc->work_thread);
- mtx_destroy(&softc->queue_lock);
-#endif
+ /* Shutdown CTL threads. */
+ softc->shutdown = 1;
+ for (i = 0; i < worker_threads; i++) {
+ struct ctl_thread *thr = &softc->threads[i];
+ while (thr->thread != NULL) {
+ wakeup(thr);
+ if (thr->thread != NULL)
+ pause("CTL thr shutdown", 1);
+ }
+ mtx_destroy(&thr->queue_lock);
+ }
+ while (softc->lun_thread != NULL) {
+ wakeup(&softc->pending_lun_queue);
+ if (softc->lun_thread != NULL)
+ pause("CTL thr shutdown", 1);
+ }
+ while (softc->thresh_thread != NULL) {
+ wakeup(softc->thresh_thread);
+ if (softc->thresh_thread != NULL)
+ pause("CTL thr shutdown", 1);
+ }
ctl_tpc_shutdown(softc);
uma_zdestroy(softc->io_zone);
mtx_destroy(&softc->ctl_lock);
- destroy_dev(softc->dev);
-
sysctl_ctx_free(&softc->sysctl_ctx);
- free(control_softc, M_DEVBUF);
+ free(softc, M_DEVBUF);
control_softc = NULL;
+ return (0);
}
static int
@@ -1970,7 +2001,7 @@ ctl_module_event_handler(module_t mod, int what, void *arg)
case MOD_LOAD:
return (ctl_init());
case MOD_UNLOAD:
- return (EBUSY);
+ return (ctl_shutdown());
default:
return (EOPNOTSUPP);
}
@@ -2197,22 +2228,19 @@ ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
* command on this side (XFER mode) or tell the other side to execute it
* (SER_ONLY mode).
*/
-static int
+static void
ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
union ctl_ha_msg msg_info;
- struct ctl_port *port;
struct ctl_lun *lun;
const struct ctl_cmd_entry *entry;
- int retval = 0;
uint32_t targ_lun;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
- mtx_lock(&softc->ctl_lock);
/* Make sure that we know about this port. */
- port = ctl_io_port(&ctsio->io_hdr);
if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
/*retry_count*/ 1);
@@ -2220,24 +2248,11 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
}
/* Make sure that we know about this LUN. */
- if ((targ_lun < CTL_MAX_LUNS) &&
- ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
- mtx_lock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
- /*
- * If the LUN is invalid, pretend that it doesn't exist.
- * It will go away as soon as all pending I/O has been
- * completed.
- */
- if (lun->flags & CTL_LUN_DISABLED) {
- mtx_unlock(&lun->lun_lock);
- lun = NULL;
- }
- } else {
+ mtx_lock(&softc->ctl_lock);
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
- lun = NULL;
- }
- if (lun == NULL) {
+
/*
* The other node would not send this request to us unless
* received announce that we are primary node for this LUN.
@@ -2247,6 +2262,18 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
ctl_set_busy(ctsio);
goto badjuju;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+
+ /*
+ * If the LUN is invalid, pretend that it doesn't exist.
+ * It will go away as soon as all pending I/Os completed.
+ */
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_busy(ctsio);
+ goto badjuju;
+ }
entry = ctl_get_cmd_entry(ctsio, NULL);
if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
@@ -2254,8 +2281,8 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
goto badjuju;
}
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
+ CTL_LUN(ctsio) = lun;
+ CTL_BACKEND_LUN(ctsio) = lun->be_lun;
/*
* Every I/O goes into the OOA queue for a
@@ -2318,10 +2345,9 @@ badjuju:
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.scsi), M_WAITOK);
- retval = 1;
+ ctl_free_io((union ctl_io *)ctsio);
break;
}
- return (retval);
}
/*
@@ -2555,6 +2581,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
struct ctl_softc *softc = dev->si_drv1;
+ struct ctl_port *port;
struct ctl_lun *lun;
int retval;
@@ -2676,9 +2703,9 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
mtx_lock(&softc->ctl_lock);
- if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
- && ((ooa_hdr->lun_num >= CTL_MAX_LUNS)
- || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
+ if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
+ (ooa_hdr->lun_num >= CTL_MAX_LUNS ||
+ softc->ctl_luns[ooa_hdr->lun_num] == NULL)) {
mtx_unlock(&softc->ctl_lock);
free(entries, M_CTL);
printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
@@ -2730,89 +2757,75 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
#ifdef CTL_IO_DELAY
mtx_lock(&softc->ctl_lock);
-
- if ((delay_info->lun_id >= CTL_MAX_LUNS)
- || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
+ if (delay_info->lun_id >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) {
+ mtx_unlock(&softc->ctl_lock);
delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
- } else {
- lun = softc->ctl_luns[delay_info->lun_id];
- mtx_lock(&lun->lun_lock);
-
- delay_info->status = CTL_DELAY_STATUS_OK;
-
- switch (delay_info->delay_type) {
- case CTL_DELAY_TYPE_CONT:
- break;
- case CTL_DELAY_TYPE_ONESHOT:
- break;
- default:
- delay_info->status =
- CTL_DELAY_STATUS_INVALID_TYPE;
- break;
- }
-
- switch (delay_info->delay_loc) {
- case CTL_DELAY_LOC_DATAMOVE:
- lun->delay_info.datamove_type =
- delay_info->delay_type;
- lun->delay_info.datamove_delay =
- delay_info->delay_secs;
- break;
- case CTL_DELAY_LOC_DONE:
- lun->delay_info.done_type =
- delay_info->delay_type;
- lun->delay_info.done_delay =
- delay_info->delay_secs;
- break;
- default:
- delay_info->status =
- CTL_DELAY_STATUS_INVALID_LOC;
- break;
- }
- mtx_unlock(&lun->lun_lock);
+ break;
}
-
+ mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
+ delay_info->status = CTL_DELAY_STATUS_OK;
+ switch (delay_info->delay_type) {
+ case CTL_DELAY_TYPE_CONT:
+ case CTL_DELAY_TYPE_ONESHOT:
+ break;
+ default:
+ delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE;
+ break;
+ }
+ switch (delay_info->delay_loc) {
+ case CTL_DELAY_LOC_DATAMOVE:
+ lun->delay_info.datamove_type = delay_info->delay_type;
+ lun->delay_info.datamove_delay = delay_info->delay_secs;
+ break;
+ case CTL_DELAY_LOC_DONE:
+ lun->delay_info.done_type = delay_info->delay_type;
+ lun->delay_info.done_delay = delay_info->delay_secs;
+ break;
+ default:
+ delay_info->status = CTL_DELAY_STATUS_INVALID_LOC;
+ break;
+ }
+ mtx_unlock(&lun->lun_lock);
#else
delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
#endif /* CTL_IO_DELAY */
break;
}
+#ifdef CTL_LEGACY_STATS
case CTL_GETSTATS: {
- struct ctl_stats *stats;
+ struct ctl_stats *stats = (struct ctl_stats *)addr;
int i;
- stats = (struct ctl_stats *)addr;
-
- if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
- stats->alloc_len) {
- stats->status = CTL_SS_NEED_MORE_SPACE;
- stats->num_luns = softc->num_luns;
- break;
- }
/*
* XXX KDM no locking here. If the LUN list changes,
* things can blow up.
*/
i = 0;
+ stats->status = CTL_SS_OK;
+ stats->fill_len = 0;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- retval = copyout(&lun->stats, &stats->lun_stats[i++],
- sizeof(lun->stats));
+ if (stats->fill_len + sizeof(lun->legacy_stats) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ break;
+ }
+ retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++],
+ sizeof(lun->legacy_stats));
if (retval != 0)
break;
+ stats->fill_len += sizeof(lun->legacy_stats);
}
stats->num_luns = softc->num_luns;
- stats->fill_len = sizeof(struct ctl_lun_io_stats) *
- softc->num_luns;
- stats->status = CTL_SS_OK;
-#ifdef CTL_TIME_IO
- stats->flags = CTL_STATS_FLAG_TIME_VALID;
-#else
stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+ stats->flags |= CTL_STATS_FLAG_TIME_VALID;
#endif
getnanouptime(&stats->timestamp);
break;
}
+#endif /* CTL_LEGACY_STATS */
case CTL_ERROR_INJECT: {
struct ctl_error_desc *err_desc, *new_err_desc;
@@ -2823,8 +2836,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
mtx_lock(&softc->ctl_lock);
- lun = softc->ctl_luns[err_desc->lun_id];
- if (lun == NULL) {
+ if (err_desc->lun_id >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
free(new_err_desc, M_CTL);
printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
@@ -2867,8 +2880,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
delete_done = 0;
mtx_lock(&softc->ctl_lock);
- lun = softc->ctl_luns[delete_desc->lun_id];
- if (lun == NULL) {
+ if (delete_desc->lun_id >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
__func__, (uintmax_t)delete_desc->lun_id);
@@ -2897,18 +2910,18 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_DUMP_STRUCTS: {
- int i, j, k;
+ int j, k;
struct ctl_port *port;
struct ctl_frontend *fe;
mtx_lock(&softc->ctl_lock);
printf("CTL Persistent Reservation information start:\n");
- for (i = 0; i < CTL_MAX_LUNS; i++) {
- lun = softc->ctl_luns[i];
-
- if ((lun == NULL)
- || ((lun->flags & CTL_LUN_DISABLED) != 0))
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ if ((lun->flags & CTL_LUN_DISABLED) != 0) {
+ mtx_unlock(&lun->lun_lock);
continue;
+ }
for (j = 0; j < CTL_MAX_PORTS; j++) {
if (lun->pr_keys[j] == NULL)
@@ -2916,11 +2929,12 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
if (lun->pr_keys[j][k] == 0)
continue;
- printf(" LUN %d port %d iid %d key "
- "%#jx\n", i, j, k,
+ printf(" LUN %ju port %d iid %d key "
+ "%#jx\n", lun->lun, j, k,
(uintmax_t)lun->pr_keys[j][k]);
}
}
+ mtx_unlock(&lun->lun_lock);
}
printf("CTL Persistent Reservation information end\n");
printf("CTL Ports:\n");
@@ -3301,9 +3315,9 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (port->lun_map != NULL) {
sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
- for (j = 0; j < CTL_MAX_LUNS; j++) {
+ for (j = 0; j < port->lun_map_size; j++) {
plun = ctl_lun_map_from_port(port, j);
- if (plun >= CTL_MAX_LUNS)
+ if (plun == UINT32_MAX)
continue;
sbuf_printf(sb,
"\t<lun id=\"%u\">%u</lun>\n",
@@ -3371,8 +3385,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
if (port->status & CTL_PORT_STATUS_ONLINE) {
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >=
- CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) ==
+ UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_port(lun, lm->port, -1,
@@ -3381,7 +3395,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
}
mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
- if (lm->plun < CTL_MAX_LUNS) {
+ if (lm->plun != UINT32_MAX) {
if (lm->lun == UINT32_MAX)
retval = ctl_lun_map_unset(port, lm->plun);
else if (lm->lun < CTL_MAX_LUNS &&
@@ -3389,17 +3403,82 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = ctl_lun_map_set(port, lm->plun, lm->lun);
else
return (ENXIO);
- } else if (lm->plun == UINT32_MAX) {
+ } else {
if (lm->lun == UINT32_MAX)
retval = ctl_lun_map_deinit(port);
else
retval = ctl_lun_map_init(port);
- } else
- return (ENXIO);
+ }
if (port->status & CTL_PORT_STATUS_ONLINE)
ctl_isc_announce_port(port);
break;
}
+ case CTL_GET_LUN_STATS: {
+ struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
+ int i;
+
+ /*
+ * XXX KDM no locking here. If the LUN list changes,
+ * things can blow up.
+ */
+ i = 0;
+ stats->status = CTL_SS_OK;
+ stats->fill_len = 0;
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if (lun->lun < stats->first_item)
+ continue;
+ if (stats->fill_len + sizeof(lun->stats) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ break;
+ }
+ retval = copyout(&lun->stats, &stats->stats[i++],
+ sizeof(lun->stats));
+ if (retval != 0)
+ break;
+ stats->fill_len += sizeof(lun->stats);
+ }
+ stats->num_items = softc->num_luns;
+ stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+ stats->flags |= CTL_STATS_FLAG_TIME_VALID;
+#endif
+ getnanouptime(&stats->timestamp);
+ break;
+ }
+ case CTL_GET_PORT_STATS: {
+ struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
+ int i;
+
+ /*
+ * XXX KDM no locking here. If the LUN list changes,
+ * things can blow up.
+ */
+ i = 0;
+ stats->status = CTL_SS_OK;
+ stats->fill_len = 0;
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port->targ_port < stats->first_item)
+ continue;
+ if (stats->fill_len + sizeof(port->stats) >
+ stats->alloc_len) {
+ stats->status = CTL_SS_NEED_MORE_SPACE;
+ break;
+ }
+ retval = copyout(&port->stats, &stats->stats[i++],
+ sizeof(port->stats));
+ if (retval != 0)
+ break;
+ stats->fill_len += sizeof(port->stats);
+ }
+ stats->num_items = softc->num_ports;
+ stats->flags = CTL_STATS_FLAG_NONE;
+#ifdef CTL_TIME_IO
+ stats->flags |= CTL_STATS_FLAG_TIME_VALID;
+#endif
+ getnanouptime(&stats->timestamp);
+ break;
+ }
default: {
/* XXX KDM should we fix this? */
#if 0
@@ -3448,15 +3527,20 @@ ctl_lun_map_init(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
struct ctl_lun *lun;
+ int size = ctl_lun_map_size;
uint32_t i;
- if (port->lun_map == NULL)
- port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
+ if (port->lun_map == NULL || port->lun_map_size < size) {
+ port->lun_map_size = 0;
+ free(port->lun_map, M_CTL);
+ port->lun_map = malloc(size * sizeof(uint32_t),
M_CTL, M_NOWAIT);
+ }
if (port->lun_map == NULL)
return (ENOMEM);
- for (i = 0; i < CTL_MAX_LUNS; i++)
+ for (i = 0; i < size; i++)
port->lun_map[i] = UINT32_MAX;
+ port->lun_map_size = size;
if (port->status & CTL_PORT_STATUS_ONLINE) {
if (port->lun_disable != NULL) {
STAILQ_FOREACH(lun, &softc->lun_list, links)
@@ -3475,6 +3559,7 @@ ctl_lun_map_deinit(struct ctl_port *port)
if (port->lun_map == NULL)
return (0);
+ port->lun_map_size = 0;
free(port->lun_map, M_CTL);
port->lun_map = NULL;
if (port->status & CTL_PORT_STATUS_ONLINE) {
@@ -3498,9 +3583,11 @@ ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
if (status != 0)
return (status);
}
+ if (plun >= port->lun_map_size)
+ return (EINVAL);
old = port->lun_map[plun];
port->lun_map[plun] = glun;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) {
if (port->lun_enable != NULL)
port->lun_enable(port->targ_lun_arg, plun);
ctl_isc_announce_port(port);
@@ -3513,11 +3600,11 @@ ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
{
uint32_t old;
- if (port->lun_map == NULL)
+ if (port->lun_map == NULL || plun >= port->lun_map_size)
return (0);
old = port->lun_map[plun];
port->lun_map[plun] = UINT32_MAX;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) {
if (port->lun_disable != NULL)
port->lun_disable(port->targ_lun_arg, plun);
ctl_isc_announce_port(port);
@@ -3531,8 +3618,10 @@ ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
if (port == NULL)
return (UINT32_MAX);
- if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS)
+ if (port->lun_map == NULL)
return (lun_id);
+ if (lun_id > port->lun_map_size)
+ return (UINT32_MAX);
return (port->lun_map[lun_id]);
}
@@ -3545,7 +3634,7 @@ ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
return (UINT32_MAX);
if (port->lun_map == NULL)
return (lun_id);
- for (i = 0; i < CTL_MAX_LUNS; i++) {
+ for (i = 0; i < port->lun_map_size; i++) {
if (port->lun_map[i] == lun_id)
return (i);
}
@@ -3613,13 +3702,6 @@ ctl_encode_lun(uint32_t decoded)
return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
}
-static struct ctl_port *
-ctl_io_port(struct ctl_io_hdr *io_hdr)
-{
-
- return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
-}
-
int
ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
{
@@ -3737,7 +3819,6 @@ int
ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
uint32_t total_ctl_io, void **npool)
{
-#ifdef IO_POOLS
struct ctl_io_pool *pool;
pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
@@ -3747,14 +3828,15 @@ ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
pool->ctl_softc = ctl_softc;
+#ifdef IO_POOLS
pool->zone = uma_zsecond_create(pool->name, NULL,
NULL, NULL, NULL, ctl_softc->io_zone);
/* uma_prealloc(pool->zone, total_ctl_io); */
-
- *npool = pool;
#else
- *npool = ctl_softc->io_zone;
+ pool->zone = ctl_softc->io_zone;
#endif
+
+ *npool = pool;
return (0);
}
@@ -3767,64 +3849,54 @@ ctl_pool_free(struct ctl_io_pool *pool)
#ifdef IO_POOLS
uma_zdestroy(pool->zone);
- free(pool, M_CTL);
#endif
+ free(pool, M_CTL);
}
union ctl_io *
ctl_alloc_io(void *pool_ref)
{
- union ctl_io *io;
-#ifdef IO_POOLS
struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
+ union ctl_io *io;
io = uma_zalloc(pool->zone, M_WAITOK);
-#else
- io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
-#endif
- if (io != NULL)
+ if (io != NULL) {
io->io_hdr.pool = pool_ref;
+ CTL_SOFTC(io) = pool->ctl_softc;
+ }
return (io);
}
union ctl_io *
ctl_alloc_io_nowait(void *pool_ref)
{
- union ctl_io *io;
-#ifdef IO_POOLS
struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
+ union ctl_io *io;
io = uma_zalloc(pool->zone, M_NOWAIT);
-#else
- io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
-#endif
- if (io != NULL)
+ if (io != NULL) {
io->io_hdr.pool = pool_ref;
+ CTL_SOFTC(io) = pool->ctl_softc;
+ }
return (io);
}
void
ctl_free_io(union ctl_io *io)
{
-#ifdef IO_POOLS
struct ctl_io_pool *pool;
-#endif
if (io == NULL)
return;
-#ifdef IO_POOLS
pool = (struct ctl_io_pool *)io->io_hdr.pool;
uma_zfree(pool->zone, io);
-#else
- uma_zfree((uma_zone_t)io->io_hdr.pool, io);
-#endif
}
void
ctl_zero_io(union ctl_io *io)
{
- void *pool_ref;
+ struct ctl_io_pool *pool;
if (io == NULL)
return;
@@ -3832,9 +3904,10 @@ ctl_zero_io(union ctl_io *io)
/*
* May need to preserve linked list pointers at some point too.
*/
- pool_ref = io->io_hdr.pool;
+ pool = io->io_hdr.pool;
memset(io, 0, sizeof(*io));
- io->io_hdr.pool = pool_ref;
+ io->io_hdr.pool = pool;
+ CTL_SOFTC(io) = pool->ctl_softc;
}
int
@@ -4400,7 +4473,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
struct scsi_vpd_id_descriptor *desc;
struct scsi_vpd_id_t10 *t10id;
const char *eui, *naa, *scsiname, *uuid, *vendor, *value;
- int lun_number, i, lun_malloced;
+ int lun_number, lun_malloced;
int devidlen, idlen1, idlen2 = 0, len;
if (be_lun == NULL)
@@ -4534,6 +4607,8 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
printf("ctl: requested LUN ID %d is already "
"in use\n", be_lun->req_lun_id);
}
+fail:
+ free(lun->lun_devid, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
free(lun, M_CTL);
be_lun->lun_config_status(be_lun->be_lun,
@@ -4546,14 +4621,11 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (lun_number == -1) {
mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl: can't allocate LUN, out of LUNs\n");
- if (lun->flags & CTL_LUN_MALLOCED)
- free(lun, M_CTL);
- be_lun->lun_config_status(be_lun->be_lun,
- CTL_LUN_CONFIG_FAILURE);
- return (ENOSPC);
+ goto fail;
}
}
ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
+ mtx_unlock(&ctl_softc->ctl_lock);
mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
lun->lun = lun_number;
@@ -4594,6 +4666,10 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
lun->ie_reported = 1;
callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
ctl_tpc_lun_init(lun);
+ if (lun->flags & CTL_LUN_REMOVABLE) {
+ lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4,
+ M_CTL, M_WAITOK);
+ }
/*
* Initialize the mode and log page index.
@@ -4601,31 +4677,31 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
ctl_init_page_index(lun);
ctl_init_log_page_index(lun);
+ /* Setup statistics gathering */
+#ifdef CTL_LEGACY_STATS
+ lun->legacy_stats.device_type = be_lun->lun_type;
+ lun->legacy_stats.lun_number = lun_number;
+ lun->legacy_stats.blocksize = be_lun->blocksize;
+ if (be_lun->blocksize == 0)
+ lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
+ for (len = 0; len < CTL_MAX_PORTS; len++)
+ lun->legacy_stats.ports[len].targ_port = len;
+#endif /* CTL_LEGACY_STATS */
+ lun->stats.item = lun_number;
+
/*
* Now, before we insert this lun on the lun list, set the lun
* inventory changed UA for all other luns.
*/
+ mtx_lock(&ctl_softc->ctl_lock);
STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
mtx_lock(&nlun->lun_lock);
ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
mtx_unlock(&nlun->lun_lock);
}
-
STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
-
ctl_softc->ctl_luns[lun_number] = lun;
-
ctl_softc->num_luns++;
-
- /* Setup statistics gathering */
- lun->stats.device_type = be_lun->lun_type;
- lun->stats.lun_number = lun_number;
- lun->stats.blocksize = be_lun->blocksize;
- if (be_lun->blocksize == 0)
- lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
- for (i = 0;i < CTL_MAX_PORTS;i++)
- lun->stats.ports[i].targ_port = i;
-
mtx_unlock(&ctl_softc->ctl_lock);
lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
@@ -4641,12 +4717,10 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
static int
ctl_free_lun(struct ctl_lun *lun)
{
- struct ctl_softc *softc;
+ struct ctl_softc *softc = lun->ctl_softc;
struct ctl_lun *nlun;
int i;
- softc = lun->ctl_softc;
-
mtx_assert(&softc->ctl_lock, MA_OWNED);
STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
@@ -4677,6 +4751,7 @@ ctl_free_lun(struct ctl_lun *lun)
for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pr_keys[i], M_CTL);
free(lun->write_buffer, M_CTL);
+ free(lun->prevent, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
free(lun, M_CTL);
@@ -4992,18 +5067,13 @@ ctl_config_move_done(union ctl_io *io)
if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
- /*
- * For hardware error sense keys, the sense key
- * specific value is defined to be a retry count,
- * but we use it to pass back an internal FETD
- * error code. XXX KDM Hopefully the FETD is only
- * using 16 bits for an error code, since that's
- * all the space we have in the sks field.
- */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/
- io->io_hdr.port_status);
+ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+ /*retry_count*/ io->io_hdr.port_status);
+ } else if (io->scsiio.kern_data_resid != 0 &&
+ (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_invalid_field_ciu(&io->scsiio);
}
if (ctl_debug & CTL_DEBUG_CDB_DATA)
@@ -5147,13 +5217,12 @@ ctl_config_read_done(union ctl_io *io)
int
ctl_scsi_release(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
uint32_t residx;
CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
/*
* XXX KDM right now, we only support LUN reservation. We don't
@@ -5185,13 +5254,12 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
int
ctl_scsi_reserve(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
uint32_t residx;
CTL_DEBUG_PRINT(("ctl_reserve\n"));
residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
/*
* XXX KDM right now, we only support LUN reservation. We don't
@@ -5226,13 +5294,12 @@ bailout:
int
ctl_start_stop(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_start_stop_unit *cdb;
- struct ctl_lun *lun;
int retval;
CTL_DEBUG_PRINT(("ctl_start_stop\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
if ((cdb->how & SSS_PC_MASK) == 0) {
@@ -5281,17 +5348,16 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
int
ctl_prevent_allow(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_prevent *cdb;
int retval;
uint32_t initidx;
CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_prevent *)ctsio->cdb;
- if ((lun->flags & CTL_LUN_REMOVABLE) == 0) {
+ if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) {
ctl_set_invalid_opcode(ctsio);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
@@ -5322,8 +5388,7 @@ ctl_prevent_allow(struct ctl_scsiio *ctsio)
int
ctl_sync_cache(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
- struct ctl_softc *softc;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t starting_lba;
uint32_t block_count;
@@ -5332,8 +5397,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
retval = 0;
switch (ctsio->cdb[0]) {
@@ -5389,13 +5452,10 @@ int
ctl_format(struct ctl_scsiio *ctsio)
{
struct scsi_format *cdb;
- struct ctl_lun *lun;
int length, defect_list_len;
CTL_DEBUG_PRINT(("ctl_format\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
cdb = (struct scsi_format *)ctsio->cdb;
length = 0;
@@ -5411,7 +5471,6 @@ ctl_format(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
ctsio->kern_data_len = length;
ctsio->kern_total_len = length;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5474,7 +5533,7 @@ bailout:
int
ctl_read_buffer(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
uint64_t buffer_offset;
uint32_t len;
uint8_t byte2;
@@ -5482,7 +5541,7 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
static uint8_t echo_descr[4] = { 0 };
CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
switch (ctsio->cdb[0]) {
case READ_BUFFER: {
struct scsi_read_buffer *cdb;
@@ -5537,7 +5596,6 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
}
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctl_set_success(ctsio);
@@ -5549,13 +5607,12 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
int
ctl_write_buffer(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_write_buffer *cdb;
- struct ctl_lun *lun;
int buffer_offset, len;
CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_write_buffer *)ctsio->cdb;
len = scsi_3btoul(cdb->length);
@@ -5584,7 +5641,6 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5602,7 +5658,7 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
int
ctl_write_same(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
@@ -5611,8 +5667,6 @@ ctl_write_same(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_write_same\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
switch (ctsio->cdb[0]) {
case WRITE_SAME_10: {
struct scsi_write_same_10 *cdb;
@@ -5694,7 +5748,6 @@ ctl_write_same(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5716,7 +5769,7 @@ ctl_write_same(struct ctl_scsiio *ctsio)
int
ctl_unmap(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_unmap *cdb;
struct ctl_ptr_len_flags *ptrlen;
struct scsi_unmap_header *hdr;
@@ -5728,9 +5781,7 @@ ctl_unmap(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_unmap\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_unmap *)ctsio->cdb;
-
len = scsi_2btoul(cdb->length);
byte2 = cdb->byte2;
@@ -5742,7 +5793,6 @@ ctl_unmap(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -5820,24 +5870,20 @@ int
ctl_default_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
- struct ctl_lun *lun;
- uint8_t *current_cp, *saved_cp;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
+ uint8_t *current_cp;
int set_ua;
uint32_t initidx;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
set_ua = 0;
current_cp = (page_index->page_data + (page_index->page_len *
CTL_PAGE_CURRENT));
- saved_cp = (page_index->page_data + (page_index->page_len *
- CTL_PAGE_SAVED));
mtx_lock(&lun->lun_lock);
if (memcmp(current_cp, page_ptr, page_index->page_len)) {
memcpy(current_cp, page_ptr, page_index->page_len);
- memcpy(saved_cp, page_ptr, page_index->page_len);
set_ua = 1;
}
if (set_ua != 0)
@@ -5878,13 +5924,12 @@ int
ctl_ie_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_info_exceptions_page *pg;
- struct ctl_lun *lun;
uint64_t t;
(void)ctl_default_page_handler(ctsio, page_index, page_ptr);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
pg = (struct scsi_info_exceptions_page *)page_ptr;
mtx_lock(&lun->lun_lock);
if (pg->info_flags & SIEP_FLAGS_TEST) {
@@ -5921,19 +5966,18 @@ ctl_ie_page_handler(struct ctl_scsiio *ctsio,
static int
ctl_do_mode_select(union ctl_io *io)
{
+ struct ctl_lun *lun = CTL_LUN(io);
struct scsi_mode_page_header *page_header;
struct ctl_page_index *page_index;
struct ctl_scsiio *ctsio;
int page_len, page_len_offset, page_len_size;
union ctl_modepage_info *modepage_info;
- struct ctl_lun *lun;
- int *len_left, *len_used;
+ uint16_t *len_left, *len_used;
int retval, i;
ctsio = &io->scsiio;
page_index = NULL;
page_len = 0;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
modepage_info = (union ctl_modepage_info *)
ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
@@ -6146,10 +6190,12 @@ bailout_no_done:
int
ctl_mode_select(struct ctl_scsiio *ctsio)
{
- int param_len, pf, sp;
- int header_size, bd_len;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
union ctl_modepage_info *modepage_info;
+ int bd_len, i, header_size, param_len, pf, rtd, sp;
+ uint32_t initidx;
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
switch (ctsio->cdb[0]) {
case MODE_SELECT_6: {
struct scsi_mode_select_6 *cdb;
@@ -6157,6 +6203,7 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+ rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
param_len = cdb->length;
header_size = sizeof(struct scsi_mode_header_6);
@@ -6168,6 +6215,7 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
+ rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
param_len = scsi_2btoul(cdb->length);
header_size = sizeof(struct scsi_mode_header_10);
@@ -6179,6 +6227,30 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
+ if (rtd) {
+ if (param_len != 0) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
+ /*command*/ 1, /*field*/ 0,
+ /*bit_valid*/ 0, /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /* Revert to defaults. */
+ ctl_init_page_index(lun);
+ mtx_lock(&lun->lun_lock);
+ ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
+ mtx_unlock(&lun->lun_lock);
+ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
+ ctl_isc_announce_mode(lun, -1,
+ lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
+ lun->mode_pages.index[i].subpage);
+ }
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
/*
* From SPC-3:
* "A parameter list length of zero indicates that the Data-Out Buffer
@@ -6210,7 +6282,6 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
ctsio->kern_data_len = param_len;
ctsio->kern_total_len = param_len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -6267,7 +6338,7 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
int
ctl_mode_sense(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
int pc, page_code, dbd, llba, subpage;
int alloc_len, page_len, header_len, total_len;
struct scsi_mode_block_descr *block_desc;
@@ -6279,7 +6350,6 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
switch (ctsio->cdb[0]) {
case MODE_SENSE_6: {
struct scsi_mode_sense_6 *cdb;
@@ -6441,17 +6511,9 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
switch (ctsio->cdb[0]) {
case MODE_SENSE_6: {
@@ -6615,12 +6677,11 @@ ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_log_param_header *phdr;
uint8_t *data;
uint64_t val;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = page_index->page_data;
if (lun->backend->lun_attr != NULL &&
@@ -6684,41 +6745,31 @@ ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct stat_page *data;
- uint64_t rn, wn, rb, wb;
- struct bintime rt, wt;
- int i;
+ struct bintime *t;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = (struct stat_page *)page_index->page_data;
scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
data->sap.hdr.param_control = SLP_LBIN;
data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
sizeof(struct scsi_log_param_header);
- rn = wn = rb = wb = 0;
- bintime_clear(&rt);
- bintime_clear(&wt);
- for (i = 0; i < CTL_MAX_PORTS; i++) {
- rn += lun->stats.ports[i].operations[CTL_STATS_READ];
- wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
- rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
- wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
- bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
- bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
- }
- scsi_u64to8b(rn, data->sap.read_num);
- scsi_u64to8b(wn, data->sap.write_num);
- if (lun->stats.blocksize > 0) {
- scsi_u64to8b(wb / lun->stats.blocksize,
- data->sap.recvieved_lba);
- scsi_u64to8b(rb / lun->stats.blocksize,
- data->sap.transmitted_lba);
- }
- scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
+ scsi_u64to8b(lun->stats.operations[CTL_STATS_READ],
+ data->sap.read_num);
+ scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE],
+ data->sap.write_num);
+ if (lun->be_lun->blocksize > 0) {
+ scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] /
+ lun->be_lun->blocksize, data->sap.recvieved_lba);
+ scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] /
+ lun->be_lun->blocksize, data->sap.transmitted_lba);
+ }
+ t = &lun->stats.time[CTL_STATS_READ];
+ scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
data->sap.read_int);
- scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
+ t = &lun->stats.time[CTL_STATS_WRITE];
+ scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
data->sap.write_int);
scsi_u64to8b(0, data->sap.weighted_num);
scsi_u64to8b(0, data->sap.weighted_int);
@@ -6743,10 +6794,9 @@ ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_log_informational_exceptions *data;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = (struct scsi_log_informational_exceptions *)page_index->page_data;
scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code);
@@ -6762,7 +6812,7 @@ ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
int
ctl_log_sense(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
int i, pc, page_code, subpage;
int alloc_len, total_len;
struct ctl_page_index *page_index;
@@ -6771,7 +6821,6 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_log_sense\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_log_sense *)ctsio->cdb;
pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
page_code = cdb->page & SLS_PAGE_CODE;
@@ -6807,17 +6856,9 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
header = (struct scsi_log_header *)ctsio->kern_data_ptr;
header->page = page_index->page_code;
@@ -6848,9 +6889,9 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
int
ctl_read_capacity(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_read_capacity *cdb;
struct scsi_read_capacity_data *data;
- struct ctl_lun *lun;
uint32_t lba;
CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
@@ -6870,14 +6911,10 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
- ctsio->residual = 0;
ctsio->kern_data_len = sizeof(*data);
ctsio->kern_total_len = sizeof(*data);
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@@ -6906,9 +6943,9 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
int
ctl_read_capacity_16(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_read_capacity_16 *cdb;
struct scsi_read_capacity_data_long *data;
- struct ctl_lun *lun;
uint64_t lba;
uint32_t alloc_len;
@@ -6931,23 +6968,12 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
-
- if (sizeof(*data) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*data);
- ctsio->kern_data_len = sizeof(*data);
- ctsio->kern_total_len = sizeof(*data);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*data), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
scsi_u64to8b(lun->be_lun->maxlba, data->addr);
/* XXX KDM this may not be 512 bytes... */
@@ -6967,9 +6993,9 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
int
ctl_get_lba_status(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_get_lba_status *cdb;
struct scsi_get_lba_status_data *data;
- struct ctl_lun *lun;
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t alloc_len, total_len;
@@ -6977,7 +7003,6 @@ ctl_get_lba_status(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_get_lba_status *)ctsio->cdb;
lba = scsi_8btou64(cdb->addr);
alloc_len = scsi_4btoul(cdb->alloc_len);
@@ -6991,19 +7016,10 @@ ctl_get_lba_status(struct ctl_scsiio *ctsio)
total_len = sizeof(*data) + sizeof(data->descr[0]);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/* Fill dummy data in case backend can't tell anything. */
scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
@@ -7054,18 +7070,10 @@ ctl_read_defect(struct ctl_scsiio *ctsio)
}
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
data10 = (struct scsi_read_defect_data_hdr_10 *)
@@ -7090,12 +7098,12 @@ ctl_read_defect(struct ctl_scsiio *ctsio)
int
ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_maintenance_in *cdb;
int retval;
int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
int num_ha_groups, num_target_ports, shared_group;
- struct ctl_lun *lun;
- struct ctl_softc *softc;
struct ctl_port *port;
struct scsi_target_group_data *rtg_ptr;
struct scsi_target_group_data_extended *rtg_ext_ptr;
@@ -7104,9 +7112,6 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
cdb = (struct scsi_maintenance_in *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
-
retval = CTL_RETVAL_COMPLETE;
switch (cdb->byte2 & STG_PDF_MASK) {
@@ -7133,7 +7138,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
STAILQ_FOREACH(port, &softc->port_list, links) {
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
num_target_ports++;
if (port->status & CTL_PORT_STATUS_HA_SHARED)
@@ -7153,20 +7158,10 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
if (ext) {
rtg_ext_ptr = (struct scsi_target_group_data_extended *)
@@ -7225,7 +7220,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
if (!softc->is_single &&
(port->status & CTL_PORT_STATUS_HA_SHARED) == 0)
continue;
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
relative_target_port_identifier);
@@ -7250,7 +7245,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
continue;
if (port->status & CTL_PORT_STATUS_HA_SHARED)
continue;
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
relative_target_port_identifier);
@@ -7272,7 +7267,7 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
int
ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_report_supported_opcodes *cdb;
const struct ctl_cmd_entry *entry, *sentry;
struct scsi_report_supported_opcodes_all *all;
@@ -7285,8 +7280,6 @@ ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
opcode = cdb->requested_opcode;
@@ -7356,20 +7349,10 @@ ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
switch (cdb->options & RSO_OPTIONS_MASK) {
case RSO_OPTIONS_ALL:
@@ -7470,20 +7453,10 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr;
data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
@@ -7518,20 +7491,10 @@ ctl_report_timestamp(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
scsi_ulto2b(sizeof(*data) - 2, data->length);
@@ -7551,11 +7514,11 @@ ctl_report_timestamp(struct ctl_scsiio *ctsio)
int
ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_per_res_in *cdb;
int alloc_len, total_len = 0;
/* struct scsi_per_res_in_rsrv in_data; */
- struct ctl_lun *lun;
- struct ctl_softc *softc;
uint64_t key;
CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
@@ -7564,9 +7527,6 @@ ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
alloc_len = scsi_2btoul(cdb->length);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
-
retry:
mtx_lock(&lun->lun_lock);
switch (cdb->action) {
@@ -7595,20 +7555,10 @@ retry:
mtx_unlock(&lun->lun_lock);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
-
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
mtx_lock(&lun->lun_lock);
switch (cdb->action) {
@@ -8129,12 +8079,12 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
int
ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
int retval;
u_int32_t param_len;
struct scsi_per_res_out *cdb;
- struct ctl_lun *lun;
struct scsi_per_res_out_parms* param;
- struct ctl_softc *softc;
uint32_t residx;
uint64_t res_key, sa_res_key, key;
uint8_t type;
@@ -8143,11 +8093,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
- retval = CTL_RETVAL_COMPLETE;
-
cdb = (struct scsi_per_res_out *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
+ retval = CTL_RETVAL_COMPLETE;
/*
* We only support whole-LUN scope. The scope & type are ignored for
@@ -8187,7 +8134,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
ctsio->kern_data_len = param_len;
ctsio->kern_total_len = param_len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -8521,17 +8467,18 @@ done:
* in sync.
*/
static void
-ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
+ctl_hndl_per_res_out_on_other_sc(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
+ union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg;
struct ctl_lun *lun;
int i;
uint32_t residx, targ_lun;
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
- ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
@@ -8642,15 +8589,13 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
int
ctl_read_write(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int flags, retval;
int isread;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
flags = 0;
@@ -8835,15 +8780,14 @@ ctl_read_write(struct ctl_scsiio *ctsio)
static int
ctl_cnw_cont(union ctl_io *io)
{
+ struct ctl_lun *lun = CTL_LUN(io);
struct ctl_scsiio *ctsio;
- struct ctl_lun *lun;
struct ctl_lba_len_flags *lbalen;
int retval;
ctsio = &io->scsiio;
ctsio->io_hdr.status = CTL_STATUS_NONE;
ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
lbalen = (struct ctl_lba_len_flags *)
&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
lbalen->flags &= ~CTL_LLF_COMPARE;
@@ -8857,14 +8801,12 @@ ctl_cnw_cont(union ctl_io *io)
int
ctl_cnw(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int flags, retval;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
flags = 0;
@@ -8945,15 +8887,13 @@ ctl_cnw(struct ctl_scsiio *ctsio)
int
ctl_verify(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int bytchk, flags;
int retval;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
bytchk = 0;
@@ -9049,27 +8989,25 @@ ctl_verify(struct ctl_scsiio *ctsio)
int
ctl_report_luns(struct ctl_scsiio *ctsio)
{
- struct ctl_softc *softc;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio);
struct scsi_report_luns *cdb;
struct scsi_report_luns_data *lun_data;
- struct ctl_lun *lun, *request_lun;
- struct ctl_port *port;
- int num_luns, retval;
+ int num_filled, num_luns, num_port_luns, retval;
uint32_t alloc_len, lun_datalen;
- int num_filled;
uint32_t initidx, targ_lun_id, lun_id;
retval = CTL_RETVAL_COMPLETE;
cdb = (struct scsi_report_luns *)ctsio->cdb;
- port = ctl_io_port(&ctsio->io_hdr);
- softc = port->ctl_softc;
CTL_DEBUG_PRINT(("ctl_report_luns\n"));
- mtx_lock(&softc->ctl_lock);
num_luns = 0;
- for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) {
- if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS)
+ num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS;
+ mtx_lock(&softc->ctl_lock);
+ for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
+ if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)
num_luns++;
}
mtx_unlock(&softc->ctl_lock);
@@ -9114,9 +9052,6 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
return (retval);
}
- request_lun = (struct ctl_lun *)
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
lun_datalen = sizeof(*lun_data) +
(num_luns * sizeof(struct scsi_report_luns_lundata));
@@ -9127,9 +9062,11 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
- for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
+ for (targ_lun_id = 0, num_filled = 0;
+ targ_lun_id < num_port_luns && num_filled < num_luns;
+ targ_lun_id++) {
lun_id = ctl_lun_map_from_port(port, targ_lun_id);
- if (lun_id >= CTL_MAX_LUNS)
+ if (lun_id == UINT32_MAX)
continue;
lun = softc->ctl_luns[lun_id];
if (lun == NULL)
@@ -9168,19 +9105,10 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
*/
lun_datalen = sizeof(*lun_data) +
(num_filled * sizeof(struct scsi_report_luns_lundata));
-
- if (lun_datalen < alloc_len) {
- ctsio->residual = alloc_len - lun_datalen;
- ctsio->kern_data_len = lun_datalen;
- ctsio->kern_total_len = lun_datalen;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(lun_datalen, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* We set this to the actual data length, regardless of how much
@@ -9205,10 +9133,10 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
int
ctl_request_sense(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_request_sense *cdb;
struct scsi_sense_data *sense_ptr;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
uint32_t initidx;
int have_error;
u_int sense_len = SSD_FULL_SIZE;
@@ -9218,9 +9146,6 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
cdb = (struct scsi_request_sense *)ctsio->cdb;
- softc = control_softc;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
CTL_DEBUG_PRINT(("ctl_request_sense\n"));
/*
@@ -9234,20 +9159,16 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_rel_offset = 0;
/*
* struct scsi_sense_data, which is currently set to 256 bytes, is
* larger than the largest allowed value for the length field in the
* REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
*/
- ctsio->residual = 0;
ctsio->kern_data_len = cdb->length;
ctsio->kern_total_len = cdb->length;
- ctsio->kern_data_resid = 0;
- ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
-
/*
* If we don't have a LUN, we don't have any pending sense.
*/
@@ -9363,31 +9284,19 @@ ctl_tur(struct ctl_scsiio *ctsio)
static int
ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_supported_pages *pages;
int sup_page_size;
- struct ctl_lun *lun;
int p;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
SCSI_EVPD_NUM_SUPPORTED_PAGES;
ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (sup_page_size < alloc_len) {
- ctsio->residual = alloc_len - sup_page_size;
- ctsio->kern_data_len = sup_page_size;
- ctsio->kern_total_len = sup_page_size;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sup_page_size, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9438,27 +9347,17 @@ ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_unit_serial_number *sn_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = 4 + CTL_SN_LEN;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9497,29 +9396,17 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_extended_inquiry_data *eid_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9549,6 +9436,11 @@ ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
eid_ptr->flags4 = SVPD_EID_LUICLR;
/*
+ * We support revert to defaults (RTD) bit in MODE SELECT.
+ */
+ eid_ptr->flags5 = SVPD_EID_RTD_SUP;
+
+ /*
* XXX KDM in order to correctly answer this, we would need
* information from the SIM to determine how much sense data it
* can send. So this would really be a path inquiry field, most
@@ -9568,31 +9460,19 @@ ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_mode_page_policy *mpp_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_mode_page_policy) +
sizeof(struct scsi_vpd_mode_page_policy_descr);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9622,19 +9502,14 @@ ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_device_id *devid_ptr;
struct scsi_vpd_id_descriptor *desc;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
- struct ctl_port *port;
int data_len, g;
uint8_t proto;
- softc = control_softc;
-
- port = ctl_io_port(&ctsio->io_hdr);
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_device_id) +
sizeof(struct scsi_vpd_id_descriptor) +
sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
@@ -9650,19 +9525,10 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9747,16 +9613,14 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_scsi_ports *sp;
struct scsi_vpd_port_designation *pd;
struct scsi_vpd_port_designation_cont *pdc;
- struct ctl_lun *lun;
struct ctl_port *port;
int data_len, num_target_ports, iid_len, id_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
num_target_ports = 0;
iid_len = 0;
id_len = 0;
@@ -9765,7 +9629,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
if (lun != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
num_target_ports++;
if (port->init_devid)
@@ -9781,19 +9645,10 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9816,7 +9671,7 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
if (lun != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
scsi_ulto2b(port->targ_port, pd->relative_port_id);
if (port->init_devid) {
@@ -9850,28 +9705,17 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_block_limits *bl_ptr;
- struct ctl_lun *lun;
uint64_t ival;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (sizeof(*bl_ptr) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*bl_ptr);
- ctsio->kern_data_len = sizeof(*bl_ptr);
- ctsio->kern_total_len = sizeof(*bl_ptr);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9927,29 +9771,17 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_block_device_characteristics *bdc_ptr;
- struct ctl_lun *lun;
const char *value;
u_int i;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (sizeof(*bdc_ptr) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*bdc_ptr);
- ctsio->kern_data_len = sizeof(*bdc_ptr);
- ctsio->kern_total_len = sizeof(*bdc_ptr);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -9987,28 +9819,16 @@ ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_logical_block_prov *lbp_ptr;
- struct ctl_lun *lun;
const char *value;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
-
- if (sizeof(*lbp_ptr) < alloc_len) {
- ctsio->residual = alloc_len - sizeof(*lbp_ptr);
- ctsio->kern_data_len = sizeof(*lbp_ptr);
- ctsio->kern_total_len = sizeof(*lbp_ptr);
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -10050,11 +9870,10 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
static int
ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_inquiry *cdb;
int alloc_len, retval;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_inquiry *)ctsio->cdb;
alloc_len = scsi_2btoul(cdb->length);
@@ -10117,21 +9936,19 @@ err:
static int
ctl_inquiry_std(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_inquiry_data *inq_ptr;
struct scsi_inquiry *cdb;
- struct ctl_softc *softc = control_softc;
- struct ctl_port *port;
- struct ctl_lun *lun;
char *val;
uint32_t alloc_len, data_len;
ctl_port_type port_type;
- port = ctl_io_port(&ctsio->io_hdr);
port_type = port->port_type;
if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
port_type = CTL_PORT_SCSI;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_inquiry *)ctsio->cdb;
alloc_len = scsi_2btoul(cdb->length);
@@ -10144,18 +9961,9 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
if (lun != NULL) {
if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
@@ -10345,14 +10153,13 @@ ctl_inquiry(struct ctl_scsiio *ctsio)
int
ctl_get_config(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_get_config_header *hdr;
struct scsi_get_config_feature *feature;
struct scsi_get_config *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
int rt, starting;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_get_config *)ctsio->cdb;
rt = (cdb->rt & SGC_RT_MASK);
starting = scsi_2btoul(cdb->starting_feature);
@@ -10373,7 +10180,6 @@ ctl_get_config(struct ctl_scsiio *ctsio)
sizeof(struct scsi_get_config_feature) + 4;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
@@ -10541,15 +10347,8 @@ done:
data_len = (uint8_t *)feature - (uint8_t *)hdr;
}
scsi_ulto4b(data_len - 4, hdr->data_length);
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
ctl_set_success(ctsio);
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -10563,11 +10362,9 @@ ctl_get_event_status(struct ctl_scsiio *ctsio)
{
struct scsi_get_event_status_header *hdr;
struct scsi_get_event_status *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
int notif_class;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_get_event_status *)ctsio->cdb;
if ((cdb->byte2 & SGESN_POLLED) == 0) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
@@ -10581,18 +10378,9 @@ ctl_get_event_status(struct ctl_scsiio *ctsio)
data_len = sizeof(struct scsi_get_event_status_header);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr;
scsi_ulto2b(0, hdr->descr_length);
@@ -10611,28 +10399,17 @@ ctl_mechanism_status(struct ctl_scsiio *ctsio)
{
struct scsi_mechanism_status_header *hdr;
struct scsi_mechanism_status *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_mechanism_status *)ctsio->cdb;
alloc_len = scsi_2btoul(cdb->length);
data_len = sizeof(struct scsi_mechanism_status_header);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr;
hdr->state1 = 0x00;
@@ -10662,14 +10439,13 @@ ctl_ultomsf(uint32_t lba, uint8_t *buf)
int
ctl_read_toc(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_read_toc_hdr *hdr;
struct scsi_read_toc_type01_descr *descr;
struct scsi_read_toc *cdb;
- struct ctl_lun *lun;
uint32_t alloc_len, data_len;
int format, msf;
- lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_read_toc *)ctsio->cdb;
msf = (cdb->byte2 & CD_MSF) != 0;
format = cdb->format;
@@ -10682,18 +10458,9 @@ ctl_read_toc(struct ctl_scsiio *ctsio)
data_len += sizeof(struct scsi_read_toc_type01_descr);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr;
if (format == 0) {
@@ -11416,7 +11183,7 @@ ctl_failover_io(union ctl_io *io, int have_lock)
static void
ctl_failover_lun(union ctl_io *rio)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(rio);
struct ctl_lun *lun;
struct ctl_io_hdr *io, *next_io;
uint32_t targ_lun;
@@ -11426,18 +11193,17 @@ ctl_failover_lun(union ctl_io *rio)
/* Find and lock the LUN. */
mtx_lock(&softc->ctl_lock);
- if ((targ_lun < CTL_MAX_LUNS) &&
- ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
- mtx_lock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
- if (lun->flags & CTL_LUN_DISABLED) {
- mtx_unlock(&lun->lun_lock);
- return;
- }
- } else {
+ if (targ_lun > CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
+ return;
+ }
if (softc->ha_mode == CTL_HA_MODE_XFER) {
TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
@@ -11505,15 +11271,13 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
struct ctl_lun *lun;
const struct ctl_cmd_entry *entry;
uint32_t initidx, targ_lun;
- int retval;
-
- retval = 0;
+ int retval = 0;
lun = NULL;
-
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
- if ((targ_lun < CTL_MAX_LUNS)
- && ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
+ if (targ_lun < CTL_MAX_LUNS)
+ lun = softc->ctl_luns[targ_lun];
+ if (lun) {
/*
* If the LUN is invalid, pretend that it doesn't exist.
* It will go away as soon as all pending I/O has been
@@ -11523,29 +11287,21 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
if (lun->flags & CTL_LUN_DISABLED) {
mtx_unlock(&lun->lun_lock);
lun = NULL;
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
- } else {
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
- lun->be_lun;
+ }
+ }
+ CTL_LUN(ctsio) = lun;
+ if (lun) {
+ CTL_BACKEND_LUN(ctsio) = lun->be_lun;
- /*
- * Every I/O goes into the OOA queue for a
- * particular LUN, and stays there until completion.
- */
+ /*
+ * Every I/O goes into the OOA queue for a particular LUN,
+ * and stays there until completion.
+ */
#ifdef CTL_TIME_IO
- if (TAILQ_EMPTY(&lun->ooa_queue)) {
- lun->idle_time += getsbinuptime() -
- lun->last_busy;
- }
+ if (TAILQ_EMPTY(&lun->ooa_queue))
+ lun->idle_time += getsbinuptime() - lun->last_busy;
#endif
- TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
- ooa_links);
- }
- } else {
- ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
- ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
}
/* Get command entry and return error if it is unsuppotyed. */
@@ -11849,7 +11605,7 @@ static int
ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
ctl_ua_type ua_type)
{
- struct ctl_port *port;
+ struct ctl_port *port = CTL_PORT(io);
struct ctl_lun *lun;
int retval;
@@ -11870,10 +11626,9 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
retval = 0;
mtx_lock(&softc->ctl_lock);
- port = ctl_io_port(&io->io_hdr);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (port != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
retval += ctl_do_lun_reset(lun, io, ua_type);
}
@@ -11946,8 +11701,10 @@ ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
ctl_clear_mask(lun->have_ca, i);
#endif
lun->prevent_count = 0;
- for (i = 0; i < CTL_MAX_INITIATORS; i++)
- ctl_clear_mask(lun->prevent, i);
+ if (lun->prevent) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++)
+ ctl_clear_mask(lun->prevent, i);
+ }
mtx_unlock(&lun->lun_lock);
return (0);
@@ -11962,7 +11719,7 @@ ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io)
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12032,7 +11789,7 @@ ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
static int
ctl_abort_task_set(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
uint32_t targ_lun;
@@ -12041,7 +11798,7 @@ ctl_abort_task_set(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12066,7 +11823,7 @@ ctl_abort_task_set(union ctl_io *io)
static int
ctl_i_t_nexus_reset(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
uint32_t initidx;
@@ -12093,7 +11850,7 @@ ctl_i_t_nexus_reset(union ctl_io *io)
#endif
if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
lun->flags &= ~CTL_LUN_RESERVED;
- if (ctl_is_set(lun->prevent, initidx)) {
+ if (lun->prevent && ctl_is_set(lun->prevent, initidx)) {
ctl_clear_mask(lun->prevent, initidx);
lun->prevent_count--;
}
@@ -12108,9 +11865,9 @@ ctl_i_t_nexus_reset(union ctl_io *io)
static int
ctl_abort_task(union ctl_io *io)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
union ctl_io *xio;
struct ctl_lun *lun;
- struct ctl_softc *softc;
#if 0
struct sbuf sb;
char printbuf[128];
@@ -12118,7 +11875,6 @@ ctl_abort_task(union ctl_io *io)
int found;
uint32_t targ_lun;
- softc = control_softc;
found = 0;
/*
@@ -12126,7 +11882,7 @@ ctl_abort_task(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12242,16 +11998,15 @@ ctl_abort_task(union ctl_io *io)
static int
ctl_query_task(union ctl_io *io, int task_set)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
union ctl_io *xio;
struct ctl_lun *lun;
- struct ctl_softc *softc;
int found = 0;
uint32_t targ_lun;
- softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12283,15 +12038,14 @@ ctl_query_task(union ctl_io *io, int task_set)
static int
ctl_query_async_event(union ctl_io *io)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
- struct ctl_softc *softc;
ctl_ua_type ua;
uint32_t targ_lun, initidx;
- softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
- if ((targ_lun >= CTL_MAX_LUNS) ||
+ if (targ_lun >= CTL_MAX_LUNS ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@@ -12312,7 +12066,7 @@ ctl_query_async_event(union ctl_io *io)
static void
ctl_run_task(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
int retval = 1;
CTL_DEBUG_PRINT(("ctl_run_task\n"));
@@ -12374,29 +12128,25 @@ ctl_run_task(union ctl_io *io)
static void
ctl_handle_isc(union ctl_io *io)
{
- int free_io;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_lun *lun;
- struct ctl_softc *softc = control_softc;
+ const struct ctl_cmd_entry *entry;
uint32_t targ_lun;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
- lun = softc->ctl_luns[targ_lun];
-
switch (io->io_hdr.msg_type) {
case CTL_MSG_SERIALIZE:
- free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
+ ctl_serialize_other_sc_cmd(&io->scsiio);
break;
- case CTL_MSG_R2R: {
- const struct ctl_cmd_entry *entry;
-
- /*
- * This is only used in SER_ONLY mode.
- */
- free_io = 0;
+ case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */
entry = ctl_get_cmd_entry(&io->scsiio, NULL);
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
+ ctl_done(io);
+ break;
+ }
mtx_lock(&lun->lun_lock);
- if (ctl_scsiio_lun_check(lun,
- entry, (struct ctl_scsiio *)io) != 0) {
+ if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
mtx_unlock(&lun->lun_lock);
ctl_done(io);
break;
@@ -12405,51 +12155,45 @@ ctl_handle_isc(union ctl_io *io)
mtx_unlock(&lun->lun_lock);
ctl_enqueue_rtr(io);
break;
- }
case CTL_MSG_FINISH_IO:
if (softc->ha_mode == CTL_HA_MODE_XFER) {
- free_io = 0;
ctl_done(io);
- } else {
- free_io = 1;
- mtx_lock(&lun->lun_lock);
- TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
- ooa_links);
- ctl_check_blocked(lun);
- mtx_unlock(&lun->lun_lock);
+ break;
+ }
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
+ ctl_free_io(io);
+ break;
}
+ mtx_lock(&lun->lun_lock);
+ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+ ctl_check_blocked(lun);
+ mtx_unlock(&lun->lun_lock);
+ ctl_free_io(io);
break;
case CTL_MSG_PERS_ACTION:
- ctl_hndl_per_res_out_on_other_sc(
- (union ctl_ha_msg *)&io->presio.pr_msg);
- free_io = 1;
+ ctl_hndl_per_res_out_on_other_sc(io);
+ ctl_free_io(io);
break;
case CTL_MSG_BAD_JUJU:
- free_io = 0;
ctl_done(io);
break;
- case CTL_MSG_DATAMOVE:
- /* Only used in XFER mode */
- free_io = 0;
+ case CTL_MSG_DATAMOVE: /* Only used in XFER mode */
ctl_datamove_remote(io);
break;
- case CTL_MSG_DATAMOVE_DONE:
- /* Only used in XFER mode */
- free_io = 0;
+ case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */
io->scsiio.be_move_done(io);
break;
case CTL_MSG_FAILOVER:
ctl_failover_lun(io);
- free_io = 1;
+ ctl_free_io(io);
break;
default:
- free_io = 1;
printf("%s: Invalid message type %d\n",
__func__, io->io_hdr.msg_type);
+ ctl_free_io(io);
break;
}
- if (free_io)
- ctl_free_io(io);
}
@@ -12601,14 +12345,15 @@ ctl_datamove_timer_wakeup(void *arg)
void
ctl_datamove(union ctl_io *io)
{
- struct ctl_lun *lun;
void (*fe_datamove)(union ctl_io *io);
- mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
+ mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
CTL_DEBUG_PRINT(("ctl_datamove\n"));
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ /* No data transferred yet. Frontend must update this when done. */
+ io->scsiio.kern_data_resid = io->scsiio.kern_data_len;
+
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
char str[256];
@@ -12689,7 +12434,7 @@ ctl_datamove(union ctl_io *io)
return;
}
- fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
+ fe_datamove = CTL_PORT(io)->fe_datamove;
fe_datamove(io);
}
@@ -12707,15 +12452,14 @@ ctl_send_datamove_done(union ctl_io *io, int have_lock)
msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
msg.hdr.nexus = io->io_hdr.nexus;
msg.hdr.status = io->io_hdr.status;
+ msg.scsi.kern_data_resid = io->scsiio.kern_data_resid;
msg.scsi.tag_num = io->scsiio.tag_num;
msg.scsi.tag_type = io->scsiio.tag_type;
msg.scsi.scsi_status = io->scsiio.scsi_status;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
io->scsiio.sense_len);
msg.scsi.sense_len = io->scsiio.sense_len;
- msg.scsi.sense_residual = io->scsiio.sense_residual;
- msg.scsi.fetd_status = io->io_hdr.port_status;
- msg.scsi.residual = io->scsiio.residual;
+ msg.scsi.port_status = io->io_hdr.port_status;
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
ctl_failover_io(io, /*have_lock*/ have_lock);
@@ -12807,7 +12551,7 @@ ctl_datamove_remote_write(union ctl_io *io)
*/
io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
- fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
+ fe_datamove = CTL_PORT(io)->fe_datamove;
fe_datamove(io);
}
@@ -12882,7 +12626,7 @@ ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
/* XXX KDM add checks like the ones in ctl_datamove? */
- fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
+ fe_datamove = CTL_PORT(io)->fe_datamove;
fe_datamove(io);
}
@@ -13098,7 +12842,7 @@ static void
ctl_datamove_remote(union ctl_io *io)
{
- mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
+ mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
ctl_failover_io(io, /*have_lock*/ 0);
@@ -13134,14 +12878,14 @@ ctl_datamove_remote(union ctl_io *io)
static void
ctl_process_done(union ctl_io *io)
{
- struct ctl_lun *lun;
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
+ struct ctl_port *port = CTL_PORT(io);
+ struct ctl_lun *lun = CTL_LUN(io);
void (*fe_done)(union ctl_io *io);
union ctl_ha_msg msg;
- uint32_t targ_port = io->io_hdr.nexus.targ_port;
CTL_DEBUG_PRINT(("ctl_process_done\n"));
- fe_done = softc->ctl_ports[targ_port]->fe_done;
+ fe_done = port->fe_done;
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
@@ -13191,7 +12935,6 @@ ctl_process_done(union ctl_io *io)
__func__, io->io_hdr.io_type);
}
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if (lun == NULL) {
CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
io->io_hdr.nexus.targ_mapped_lun));
@@ -13245,11 +12988,13 @@ ctl_process_done(union ctl_io *io)
*/
if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
io->io_hdr.io_type == CTL_IO_SCSI) {
-#ifdef CTL_TIME_IO
- struct bintime cur_bt;
-#endif
int type;
+#ifdef CTL_TIME_IO
+ struct bintime bt;
+ getbinuptime(&bt);
+ bintime_sub(&bt, &io->io_hdr.start_bt);
+#endif
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN)
type = CTL_STATS_READ;
@@ -13259,18 +13004,38 @@ ctl_process_done(union ctl_io *io)
else
type = CTL_STATS_NO_IO;
- lun->stats.ports[targ_port].bytes[type] +=
+#ifdef CTL_LEGACY_STATS
+ uint32_t targ_port = port->targ_port;
+ lun->legacy_stats.ports[targ_port].bytes[type] +=
io->scsiio.kern_total_len;
- lun->stats.ports[targ_port].operations[type]++;
+ lun->legacy_stats.ports[targ_port].operations[type] ++;
+ lun->legacy_stats.ports[targ_port].num_dmas[type] +=
+ io->io_hdr.num_dmas;
#ifdef CTL_TIME_IO
- bintime_add(&lun->stats.ports[targ_port].dma_time[type],
+ bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type],
&io->io_hdr.dma_bt);
- getbinuptime(&cur_bt);
- bintime_sub(&cur_bt, &io->io_hdr.start_bt);
- bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
+ bintime_add(&lun->legacy_stats.ports[targ_port].time[type],
+ &bt);
#endif
- lun->stats.ports[targ_port].num_dmas[type] +=
- io->io_hdr.num_dmas;
+#endif /* CTL_LEGACY_STATS */
+
+ lun->stats.bytes[type] += io->scsiio.kern_total_len;
+ lun->stats.operations[type] ++;
+ lun->stats.dmas[type] += io->io_hdr.num_dmas;
+#ifdef CTL_TIME_IO
+ bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt);
+ bintime_add(&lun->stats.time[type], &bt);
+#endif
+
+ mtx_lock(&port->port_lock);
+ port->stats.bytes[type] += io->scsiio.kern_total_len;
+ port->stats.operations[type] ++;
+ port->stats.dmas[type] += io->io_hdr.num_dmas;
+#ifdef CTL_TIME_IO
+ bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt);
+ bintime_add(&port->stats.time[type], &bt);
+#endif
+ mtx_unlock(&port->port_lock);
}
/*
@@ -13345,42 +13110,36 @@ bailout:
int
ctl_queue_sense(union ctl_io *io)
{
+ struct ctl_softc *softc = CTL_SOFTC(io);
+ struct ctl_port *port = CTL_PORT(io);
struct ctl_lun *lun;
- struct ctl_port *port;
- struct ctl_softc *softc;
uint32_t initidx, targ_lun;
- softc = control_softc;
-
CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
+ targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
+
/*
* LUN lookup will likely move to the ctl_work_thread() once we
* have our new queueing infrastructure (that doesn't put things on
* a per-LUN queue initially). That is so that we can handle
* things like an INQUIRY to a LUN that we don't have enabled. We
* can't deal with that right now.
+ * If we don't have a LUN for this, just toss the sense information.
*/
mtx_lock(&softc->ctl_lock);
-
- /*
- * If we don't have a LUN for this, just toss the sense
- * information.
- */
- port = ctl_io_port(&ctsio->io_hdr);
- targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
- if ((targ_lun < CTL_MAX_LUNS)
- && (softc->ctl_luns[targ_lun] != NULL))
- lun = softc->ctl_luns[targ_lun];
- else
+ if (targ_lun >= CTL_MAX_LUNS ||
+ (lun = softc->ctl_luns[targ_lun]) == NULL) {
+ mtx_unlock(&softc->ctl_lock);
goto bailout;
-
- initidx = ctl_get_initindex(&io->io_hdr.nexus);
-
+ }
mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+
/*
* Already have CA set for this LUN...toss the sense information.
*/
+ initidx = ctl_get_initindex(&io->io_hdr.nexus);
if (ctl_is_set(lun->have_ca, initidx)) {
mtx_unlock(&lun->lun_lock);
goto bailout;
@@ -13393,10 +13152,7 @@ ctl_queue_sense(union ctl_io *io)
mtx_unlock(&lun->lun_lock);
bailout:
- mtx_unlock(&softc->ctl_lock);
-
ctl_free_io(io);
-
return (CTL_RETVAL_COMPLETE);
}
#endif
@@ -13408,7 +13164,7 @@ bailout:
int
ctl_queue(union ctl_io *io)
{
- struct ctl_port *port;
+ struct ctl_port *port = CTL_PORT(io);
CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
@@ -13418,7 +13174,6 @@ ctl_queue(union ctl_io *io)
#endif /* CTL_TIME_IO */
/* Map FE-specific LUN ID into global one. */
- port = ctl_io_port(&io->io_hdr);
io->io_hdr.nexus.targ_mapped_lun =
ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
@@ -13451,9 +13206,8 @@ ctl_done_timer_wakeup(void *arg)
void
ctl_serseq_done(union ctl_io *io)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(io);;
- lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if (lun->be_lun == NULL ||
lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
return;
@@ -13503,9 +13257,7 @@ ctl_done(union ctl_io *io)
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
} else {
- struct ctl_lun *lun;
-
- lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ struct ctl_lun *lun = CTL_LUN(io);
if ((lun != NULL)
&& (lun->delay_info.done_delay > 0)) {
@@ -13535,7 +13287,7 @@ ctl_work_thread(void *arg)
CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
- for (;;) {
+ while (!softc->shutdown) {
/*
* We handle the queues in this order:
* - ISC
@@ -13585,6 +13337,8 @@ ctl_work_thread(void *arg)
/* Sleep until we have something to do. */
mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
}
+ thr->thread = NULL;
+ kthread_exit();
}
static void
@@ -13595,7 +13349,7 @@ ctl_lun_thread(void *arg)
CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
- for (;;) {
+ while (!softc->shutdown) {
mtx_lock(&softc->ctl_lock);
be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
if (be_lun != NULL) {
@@ -13609,6 +13363,8 @@ ctl_lun_thread(void *arg)
mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
PDROP | PRIBIO, "-", 0);
}
+ softc->lun_thread = NULL;
+ kthread_exit();
}
static void
@@ -13624,7 +13380,7 @@ ctl_thresh_thread(void *arg)
CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
- for (;;) {
+ while (!softc->shutdown) {
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if ((lun->flags & CTL_LUN_DISABLED) ||
@@ -13709,15 +13465,17 @@ ctl_thresh_thread(void *arg)
mtx_lock(&softc->ctl_lock);
}
}
- mtx_unlock(&softc->ctl_lock);
- pause("-", CTL_LBP_PERIOD * hz);
+ mtx_sleep(&softc->thresh_thread, &softc->ctl_lock,
+ PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz);
}
+ softc->thresh_thread = NULL;
+ kthread_exit();
}
static void
ctl_enqueue_incoming(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
u_int idx;
@@ -13733,7 +13491,7 @@ ctl_enqueue_incoming(union ctl_io *io)
static void
ctl_enqueue_rtr(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
@@ -13746,7 +13504,7 @@ ctl_enqueue_rtr(union ctl_io *io)
static void
ctl_enqueue_done(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
@@ -13759,7 +13517,7 @@ ctl_enqueue_done(union ctl_io *io)
static void
ctl_enqueue_isc(union ctl_io *io)
{
- struct ctl_softc *softc = control_softc;
+ struct ctl_softc *softc = CTL_SOFTC(io);
struct ctl_thread *thr;
thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
index 7b4d06c..d9a4f5a 100644
--- a/sys/cam/ctl/ctl.h
+++ b/sys/cam/ctl/ctl.h
@@ -74,18 +74,12 @@ struct ctl_port_entry {
struct ctl_modepage_header {
uint8_t page_code;
uint8_t subpage;
- int32_t len_used;
- int32_t len_left;
-};
-
-struct ctl_modepage_aps {
- struct ctl_modepage_header header;
- uint8_t lock_active;
+ uint16_t len_used;
+ uint16_t len_left;
};
union ctl_modepage_info {
struct ctl_modepage_header header;
- struct ctl_modepage_aps aps;
};
/*
diff --git a/sys/cam/ctl/ctl_backend.c b/sys/cam/ctl/ctl_backend.c
index 86f7d3c..bac7e85 100644
--- a/sys/cam/ctl/ctl_backend.c
+++ b/sys/cam/ctl/ctl_backend.c
@@ -67,11 +67,10 @@ ctl_backend_register(struct ctl_backend_driver *be)
{
struct ctl_softc *softc = control_softc;
struct ctl_backend_driver *be_tmp;
+ int error;
+ /* Sanity check, make sure this isn't a duplicate registration. */
mtx_lock(&softc->ctl_lock);
- /*
- * Sanity check, make sure this isn't a duplicate registration.
- */
STAILQ_FOREACH(be_tmp, &softc->be_list, links) {
if (strcmp(be_tmp->name, be->name) == 0) {
mtx_unlock(&softc->ctl_lock);
@@ -79,39 +78,24 @@ ctl_backend_register(struct ctl_backend_driver *be)
}
}
mtx_unlock(&softc->ctl_lock);
-
- /*
- * Call the backend's initialization routine.
- */
- be->init();
-
- mtx_lock(&softc->ctl_lock);
-
- STAILQ_INSERT_TAIL(&softc->be_list, be, links);
-
- softc->num_backends++;
-
- /*
- * Don't want to increment the usage count for internal consumers,
- * we won't be able to unload otherwise.
- */
- /* XXX KDM find a substitute for this? */
-#if 0
- if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
- MOD_INC_USE_COUNT;
-#endif
-
#ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
be->config_move_done = ctl_config_move_done;
#endif
- /* XXX KDM fix this! */
be->num_luns = 0;
-#if 0
- atomic_set(&be->num_luns, 0);
-#endif
- mtx_unlock(&softc->ctl_lock);
+ /* Call the backend's initialization routine. */
+ if (be->init != NULL) {
+ if ((error = be->init()) != 0) {
+ printf("%s backend init error: %d\n",
+ be->name, error);
+ return (error);
+ }
+ }
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_INSERT_TAIL(&softc->be_list, be, links);
+ softc->num_backends++;
+ mtx_unlock(&softc->ctl_lock);
return (0);
}
@@ -119,30 +103,21 @@ int
ctl_backend_deregister(struct ctl_backend_driver *be)
{
struct ctl_softc *softc = control_softc;
-
- mtx_lock(&softc->ctl_lock);
-
-#if 0
- if (atomic_read(&be->num_luns) != 0) {
-#endif
- /* XXX KDM fix this! */
- if (be->num_luns != 0) {
- mtx_unlock(&softc->ctl_lock);
- return (-1);
+ int error;
+
+ /* Call the backend's shutdown routine. */
+ if (be->shutdown != NULL) {
+ if ((error = be->shutdown()) != 0) {
+ printf("%s backend shutdown error: %d\n",
+ be->name, error);
+ return (error);
+ }
}
+ mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->be_list, be, ctl_backend_driver, links);
-
softc->num_backends--;
-
- /* XXX KDM find a substitute for this? */
-#if 0
- if ((be->flags & CTL_BE_FLAG_INTERNAL) == 0)
- MOD_DEC_USE_COUNT;
-#endif
-
mtx_unlock(&softc->ctl_lock);
-
return (0);
}
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index 4177e2d..4202efc 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -1,6 +1,6 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
- * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,54 +40,7 @@
#ifndef _CTL_BACKEND_H_
#define _CTL_BACKEND_H_
-/*
- * XXX KDM move this to another header file?
- */
-#define CTL_BE_NAME_LEN 32
-
-/*
- * The ID_REQ flag is used to say that the caller has requested a
- * particular LUN ID in the req_lun_id field. If we cannot allocate that
- * LUN ID, the ctl_add_lun() call will fail.
- *
- * The STOPPED flag tells us that the LUN should default to the powered
- * off state. It will return 0x04,0x02 until it is powered up. ("Logical
- * unit not ready, initializing command required.")
- *
- * The NO_MEDIA flag tells us that the LUN has no media inserted.
- *
- * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
- * which is accessible via the Master shelf controller in an HA. This flag
- * being set indicates a Primary LUN. This flag being reset represents a
- * Secondary LUN controlled by the Secondary controller in an HA
- * configuration. Flag is applicable at this time to T_DIRECT types.
- *
- * The SERIAL_NUM flag tells us that the serial_num field is filled in and
- * valid for use in SCSI INQUIRY VPD page 0x80.
- *
- * The DEVID flag tells us that the device_id field is filled in and
- * valid for use in SCSI INQUIRY VPD page 0x83.
- *
- * The DEV_TYPE flag tells us that the device_type field is filled in.
- *
- * The EJECTED flag tells us that the removable LUN has tray open.
- *
- * The UNMAP flag tells us that this LUN supports UNMAP.
- *
- * The OFFLINE flag tells us that this LUN can not access backing store.
- */
-typedef enum {
- CTL_LUN_FLAG_ID_REQ = 0x01,
- CTL_LUN_FLAG_STOPPED = 0x02,
- CTL_LUN_FLAG_NO_MEDIA = 0x04,
- CTL_LUN_FLAG_PRIMARY = 0x08,
- CTL_LUN_FLAG_SERIAL_NUM = 0x10,
- CTL_LUN_FLAG_DEVID = 0x20,
- CTL_LUN_FLAG_DEV_TYPE = 0x40,
- CTL_LUN_FLAG_UNMAP = 0x80,
- CTL_LUN_FLAG_EJECTED = 0x100,
- CTL_LUN_FLAG_READONLY = 0x200
-} ctl_backend_lun_flags;
+#include <cam/ctl/ctl_ioctl.h>
typedef enum {
CTL_LUN_SERSEQ_OFF,
@@ -102,12 +55,13 @@ typedef enum {
{ \
switch (type) { \
case MOD_LOAD: \
- ctl_backend_register( \
- (struct ctl_backend_driver *)data); \
+ return (ctl_backend_register( \
+ (struct ctl_backend_driver *)data)); \
break; \
case MOD_UNLOAD: \
- printf(#name " module unload - not possible for this module type\n"); \
- return EINVAL; \
+ return (ctl_backend_deregister( \
+ (struct ctl_backend_driver *)data)); \
+ break; \
default: \
return EOPNOTSUPP; \
} \
@@ -226,10 +180,10 @@ struct ctl_be_lun {
typedef enum {
CTL_BE_FLAG_NONE = 0x00, /* no flags */
CTL_BE_FLAG_HAS_CONFIG = 0x01, /* can do config reads, writes */
- CTL_BE_FLAG_INTERNAL = 0x02 /* don't inc mod refcount */
} ctl_backend_flags;
typedef int (*be_init_t)(void);
+typedef int (*be_shutdown_t)(void);
typedef int (*be_func_t)(union ctl_io *io);
typedef void (*be_vfunc_t)(union ctl_io *io);
typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
@@ -241,6 +195,7 @@ struct ctl_backend_driver {
char name[CTL_BE_NAME_LEN]; /* passed to CTL */
ctl_backend_flags flags; /* passed to CTL */
be_init_t init; /* passed to CTL */
+ be_shutdown_t shutdown; /* passed to CTL */
be_func_t data_submit; /* passed to CTL */
be_func_t data_move_done; /* passed to CTL */
be_func_t config_read; /* passed to CTL */
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index 8fa93cf..2ac0cdd 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -185,6 +185,7 @@ struct ctl_be_block_lun {
*/
struct ctl_be_block_softc {
struct mtx lock;
+ uma_zone_t beio_zone;
int num_luns;
STAILQ_HEAD(, ctl_be_block_lun) lun_list;
};
@@ -276,13 +277,15 @@ static int ctl_be_block_config_write(union ctl_io *io);
static int ctl_be_block_config_read(union ctl_io *io);
static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname);
-int ctl_be_block_init(void);
+static int ctl_be_block_init(void);
+static int ctl_be_block_shutdown(void);
static struct ctl_backend_driver ctl_be_block_driver =
{
.name = "block",
.flags = CTL_BE_FLAG_HAS_CONFIG,
.init = ctl_be_block_init,
+ .shutdown = ctl_be_block_shutdown,
.data_submit = ctl_be_block_submit,
.data_move_done = ctl_be_block_move_done,
.config_read = ctl_be_block_config_read,
@@ -295,14 +298,12 @@ static struct ctl_backend_driver ctl_be_block_driver =
MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
-static uma_zone_t beio_zone;
-
static struct ctl_be_block_io *
ctl_alloc_beio(struct ctl_be_block_softc *softc)
{
struct ctl_be_block_io *beio;
- beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO);
+ beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO);
beio->softc = softc;
return (beio);
}
@@ -335,7 +336,7 @@ ctl_free_beio(struct ctl_be_block_io *beio)
duplicate_free, beio->num_segs);
}
- uma_zfree(beio_zone, beio);
+ uma_zfree(beio->softc->beio_zone, beio);
}
static void
@@ -422,6 +423,16 @@ ctl_be_block_move_done(union ctl_io *io)
*/
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
;
+ } else if ((io->io_hdr.port_status != 0) &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+ /*retry_count*/ io->io_hdr.port_status);
+ } else if (io->scsiio.kern_data_resid != 0 &&
+ (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_invalid_field_ciu(&io->scsiio);
} else if ((io->io_hdr.port_status == 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
lbalen = ARGS(beio->io);
@@ -431,21 +442,6 @@ ctl_be_block_move_done(union ctl_io *io)
/* We have two data blocks ready for comparison. */
ctl_be_block_compare(io);
}
- } else if ((io->io_hdr.port_status != 0) &&
- ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
- (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
- /*
- * For hardware error sense keys, the sense key
- * specific value is defined to be a retry count,
- * but we use it to pass back an internal FETD
- * error code. XXX KDM Hopefully the FETD is only
- * using 16 bits for an error code, since that's
- * all the space we have in the sks field.
- */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/
- io->io_hdr.port_status);
}
/*
@@ -1637,7 +1633,6 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
else
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
- io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -1754,8 +1749,7 @@ ctl_be_block_submit(union ctl_io *io)
DPRINTF("entered\n");
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
+ cbe_lun = CTL_BACKEND_LUN(io);
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
/*
@@ -2731,8 +2725,7 @@ ctl_be_block_config_write(union ctl_io *io)
DPRINTF("entered\n");
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
+ cbe_lun = CTL_BACKEND_LUN(io);
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
retval = 0;
@@ -2817,8 +2810,7 @@ ctl_be_block_config_read(union ctl_io *io)
DPRINTF("entered\n");
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
+ cbe_lun = CTL_BACKEND_LUN(io);
be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun;
switch (io->scsiio.cdb[0]) {
@@ -2882,19 +2874,40 @@ ctl_be_block_lun_attr(void *be_lun, const char *attrname)
return (lun->getattr(lun, attrname));
}
-int
+static int
ctl_be_block_init(void)
{
- struct ctl_be_block_softc *softc;
- int retval;
-
- softc = &backend_block_softc;
- retval = 0;
+ struct ctl_be_block_softc *softc = &backend_block_softc;
mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
- beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
+ softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
STAILQ_INIT(&softc->lun_list);
+ return (0);
+}
- return (retval);
+
+static int
+ctl_be_block_shutdown(void)
+{
+ struct ctl_be_block_softc *softc = &backend_block_softc;
+ struct ctl_be_block_lun *lun, *next_lun;
+
+ mtx_lock(&softc->lock);
+ STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
+ /*
+ * Drop our lock here. Since ctl_invalidate_lun() can call
+ * back into us, this could potentially lead to a recursive
+ * lock of the same mutex, which would cause a hang.
+ */
+ mtx_unlock(&softc->lock);
+ ctl_disable_lun(&lun->cbe_lun);
+ ctl_invalidate_lun(&lun->cbe_lun);
+ mtx_lock(&softc->lock);
+ }
+ mtx_unlock(&softc->lock);
+
+ uma_zdestroy(softc->beio_zone);
+ mtx_destroy(&softc->lock);
+ return (0);
}
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index d170446..b743bc6 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2003, 2008 Silicon Graphics International Corp.
* Copyright (c) 2012 The FreeBSD Foundation
- * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed by Edward Tomasz Napierala
@@ -35,7 +35,7 @@
* $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
*/
/*
- * CAM Target Layer backend for a "fake" ramdisk.
+ * CAM Target Layer black hole and RAM disk backend.
*
* Author: Ken Merry <ken@FreeBSD.org>
*/
@@ -48,9 +48,11 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/condvar.h>
#include <sys/types.h>
+#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
+#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <sys/time.h>
#include <sys/queue.h>
@@ -71,6 +73,29 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_error.h>
+#define PRIV(io) \
+ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
+#define ARGS(io) \
+ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
+
+#define PPP (PAGE_SIZE / sizeof(uint8_t **))
+#ifdef __LP64__
+#define PPPS (PAGE_SHIFT - 3)
+#else
+#define PPPS (PAGE_SHIFT - 2)
+#endif
+#define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry))
+
+#define P_UNMAPPED NULL /* Page is unmapped. */
+#define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */
+
+typedef enum {
+ GP_READ, /* Return data page or zero page. */
+ GP_WRITE, /* Return data page, try allocate if none. */
+ GP_ANCHOR, /* Return data page, try anchor if none. */
+ GP_OTHER, /* Return what present, do not allocate/anchor. */
+} getpage_op_t;
+
typedef enum {
CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
@@ -79,28 +104,29 @@ typedef enum {
struct ctl_be_ramdisk_lun {
struct ctl_lun_create_params params;
- char lunname[32];
- uint64_t size_bytes;
- uint64_t size_blocks;
+ char lunname[32];
+ int indir;
+ uint8_t **pages;
+ uint8_t *zero_page;
+ struct sx page_lock;
+ u_int pblocksize;
+ u_int pblockmul;
+ uint64_t size_bytes;
+ uint64_t size_blocks;
+ uint64_t cap_bytes;
+ uint64_t cap_used;
struct ctl_be_ramdisk_softc *softc;
ctl_be_ramdisk_lun_flags flags;
STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
- struct ctl_be_lun cbe_lun;
- struct taskqueue *io_taskqueue;
- struct task io_task;
+ struct ctl_be_lun cbe_lun;
+ struct taskqueue *io_taskqueue;
+ struct task io_task;
STAILQ_HEAD(, ctl_io_hdr) cont_queue;
- struct mtx_padalign queue_lock;
+ struct mtx_padalign queue_lock;
};
struct ctl_be_ramdisk_softc {
struct mtx lock;
- int rd_size;
-#ifdef CTL_RAMDISK_PAGES
- uint8_t **ramdisk_pages;
- int num_pages;
-#else
- uint8_t *ramdisk_buffer;
-#endif
int num_luns;
STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
};
@@ -108,11 +134,16 @@ struct ctl_be_ramdisk_softc {
static struct ctl_be_ramdisk_softc rd_softc;
extern struct ctl_softc *control_softc;
-int ctl_backend_ramdisk_init(void);
-void ctl_backend_ramdisk_shutdown(void);
+static int ctl_backend_ramdisk_init(void);
+static int ctl_backend_ramdisk_shutdown(void);
static int ctl_backend_ramdisk_move_done(union ctl_io *io);
+static void ctl_backend_ramdisk_compare(union ctl_io *io);
+static void ctl_backend_ramdisk_rw(union ctl_io *io);
static int ctl_backend_ramdisk_submit(union ctl_io *io);
-static void ctl_backend_ramdisk_continue(union ctl_io *io);
+static void ctl_backend_ramdisk_worker(void *context, int pending);
+static int ctl_backend_ramdisk_config_read(union ctl_io *io);
+static int ctl_backend_ramdisk_config_write(union ctl_io *io);
+static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname);
static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
caddr_t addr, int flag, struct thread *td);
static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
@@ -121,63 +152,43 @@ static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_req *req);
static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_req *req);
-static void ctl_backend_ramdisk_worker(void *context, int pending);
static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
ctl_lun_config_status status);
-static int ctl_backend_ramdisk_config_write(union ctl_io *io);
-static int ctl_backend_ramdisk_config_read(union ctl_io *io);
static struct ctl_backend_driver ctl_be_ramdisk_driver =
{
.name = "ramdisk",
.flags = CTL_BE_FLAG_HAS_CONFIG,
.init = ctl_backend_ramdisk_init,
+ .shutdown = ctl_backend_ramdisk_shutdown,
.data_submit = ctl_backend_ramdisk_submit,
.data_move_done = ctl_backend_ramdisk_move_done,
.config_read = ctl_backend_ramdisk_config_read,
.config_write = ctl_backend_ramdisk_config_write,
- .ioctl = ctl_backend_ramdisk_ioctl
+ .ioctl = ctl_backend_ramdisk_ioctl,
+ .lun_attr = ctl_backend_ramdisk_lun_attr,
};
MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
-int
+static int
ctl_backend_ramdisk_init(void)
{
struct ctl_be_ramdisk_softc *softc = &rd_softc;
-#ifdef CTL_RAMDISK_PAGES
- int i;
-#endif
memset(softc, 0, sizeof(*softc));
mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
STAILQ_INIT(&softc->lun_list);
- softc->rd_size = 1024 * 1024;
-#ifdef CTL_RAMDISK_PAGES
- softc->num_pages = softc->rd_size / PAGE_SIZE;
- softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
- softc->num_pages, M_RAMDISK,
- M_WAITOK);
- for (i = 0; i < softc->num_pages; i++)
- softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
-#else
- softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
- M_WAITOK);
-#endif
-
return (0);
}
-void
+static int
ctl_backend_ramdisk_shutdown(void)
{
struct ctl_be_ramdisk_softc *softc = &rd_softc;
struct ctl_be_ramdisk_lun *lun, *next_lun;
-#ifdef CTL_RAMDISK_PAGES
- int i;
-#endif
mtx_lock(&softc->lock);
STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
@@ -192,35 +203,210 @@ ctl_backend_ramdisk_shutdown(void)
mtx_lock(&softc->lock);
}
mtx_unlock(&softc->lock);
-
-#ifdef CTL_RAMDISK_PAGES
- for (i = 0; i < softc->num_pages; i++)
- free(softc->ramdisk_pages[i], M_RAMDISK);
+ mtx_destroy(&softc->lock);
+ return (0);
+}
- free(softc->ramdisk_pages, M_RAMDISK);
-#else
- free(softc->ramdisk_buffer, M_RAMDISK);
-#endif
+static uint8_t *
+ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
+ getpage_op_t op)
+{
+ uint8_t **p, ***pp;
+ off_t i;
+ int s;
+
+ if (be_lun->cap_bytes == 0) {
+ switch (op) {
+ case GP_READ:
+ return (be_lun->zero_page);
+ case GP_WRITE:
+ return ((uint8_t *)be_lun->pages);
+ case GP_ANCHOR:
+ return (P_ANCHORED);
+ default:
+ return (P_UNMAPPED);
+ }
+ }
+ if (op == GP_WRITE || op == GP_ANCHOR) {
+ sx_xlock(&be_lun->page_lock);
+ pp = &be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (*pp == NULL) {
+ *pp = malloc(PAGE_SIZE, M_RAMDISK,
+ M_WAITOK|M_ZERO);
+ }
+ i = pn >> s;
+ pp = (uint8_t ***)&(*pp)[i];
+ pn -= i << s;
+ }
+ if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
+ if (op == GP_WRITE) {
+ *pp = malloc(be_lun->pblocksize, M_RAMDISK,
+ M_WAITOK|M_ZERO);
+ } else
+ *pp = P_ANCHORED;
+ be_lun->cap_used += be_lun->pblocksize;
+ } else if (*pp == P_ANCHORED && op == GP_WRITE) {
+ *pp = malloc(be_lun->pblocksize, M_RAMDISK,
+ M_WAITOK|M_ZERO);
+ }
+ sx_xunlock(&be_lun->page_lock);
+ return ((uint8_t *)*pp);
+ } else {
+ sx_slock(&be_lun->page_lock);
+ p = be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (p == NULL)
+ break;
+ i = pn >> s;
+ p = (uint8_t **)p[i];
+ pn -= i << s;
+ }
+ sx_sunlock(&be_lun->page_lock);
+ if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
+ return (be_lun->zero_page);
+ return ((uint8_t *)p);
+ }
+};
- if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
- printf("ctl_backend_ramdisk_shutdown: "
- "ctl_backend_deregister() failed!\n");
+static void
+ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
+{
+ uint8_t ***pp;
+ off_t i;
+ int s;
+
+ if (be_lun->cap_bytes == 0)
+ return;
+ sx_xlock(&be_lun->page_lock);
+ pp = &be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (*pp == NULL)
+ goto noindir;
+ i = pn >> s;
+ pp = (uint8_t ***)&(*pp)[i];
+ pn -= i << s;
}
+ if (*pp == P_ANCHORED) {
+ be_lun->cap_used -= be_lun->pblocksize;
+ *pp = P_UNMAPPED;
+ } else if (*pp != P_UNMAPPED) {
+ free(*pp, M_RAMDISK);
+ be_lun->cap_used -= be_lun->pblocksize;
+ *pp = P_UNMAPPED;
+ }
+noindir:
+ sx_xunlock(&be_lun->page_lock);
+};
+
+static void
+ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
+{
+ uint8_t ***pp;
+ off_t i;
+ int s;
+
+ if (be_lun->cap_bytes == 0)
+ return;
+ sx_xlock(&be_lun->page_lock);
+ pp = &be_lun->pages;
+ for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
+ if (*pp == NULL)
+ goto noindir;
+ i = pn >> s;
+ pp = (uint8_t ***)&(*pp)[i];
+ pn -= i << s;
+ }
+ if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
+ be_lun->cap_used += be_lun->pblocksize;
+ *pp = P_ANCHORED;
+ } else if (*pp != P_ANCHORED) {
+ free(*pp, M_RAMDISK);
+ *pp = P_ANCHORED;
+ }
+noindir:
+ sx_xunlock(&be_lun->page_lock);
+};
+
+static void
+ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
+{
+ int i;
+
+ if (p == NULL)
+ return;
+ if (indir == 0) {
+ free(p, M_RAMDISK);
+ return;
+ }
+ for (i = 0; i < PPP; i++) {
+ if (p[i] == NULL)
+ continue;
+ ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
+ }
+ free(p, M_RAMDISK);
+};
+
+static size_t
+cmp(uint8_t *a, uint8_t *b, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (a[i] != b[i])
+ break;
+ }
+ return (i);
+}
+
+static int
+ctl_backend_ramdisk_cmp(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ uint8_t *page;
+ uint8_t info[8];
+ uint64_t lba;
+ u_int lbaoff, lbas, res, off;
+
+ lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
+ lba = ARGS(io)->lba + PRIV(io)->len - lbas;
+ off = 0;
+ for (; lbas > 0; lbas--, lba++) {
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ lba >> cbe_lun->pblockexp, GP_READ);
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ page += lbaoff * cbe_lun->blocksize;
+ res = cmp(io->scsiio.kern_data_ptr + off, page,
+ cbe_lun->blocksize);
+ off += res;
+ if (res < cbe_lun->blocksize)
+ break;
+ }
+ if (lbas > 0) {
+ off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
+ scsi_u64to8b(off, info);
+ ctl_set_sense(&io->scsiio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_MISCOMPARE,
+ /*asc*/ 0x1D, /*ascq*/ 0x00,
+ /*type*/ SSD_ELEM_INFO,
+ /*size*/ sizeof(info), /*data*/ &info,
+ /*type*/ SSD_ELEM_NONE);
+ return (1);
+ }
+ return (0);
}
static int
ctl_backend_ramdisk_move_done(union ctl_io *io)
{
- struct ctl_be_lun *cbe_lun;
- struct ctl_be_ramdisk_lun *be_lun;
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
#ifdef CTL_TIME_IO
struct bintime cur_bt;
#endif
CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
- be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
#ifdef CTL_TIME_IO
getbinuptime(&cur_bt);
bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
@@ -232,9 +418,24 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
;
+ } else if (io->io_hdr.port_status != 0 &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
+ /*retry_count*/ io->io_hdr.port_status);
+ } else if (io->scsiio.kern_data_resid != 0 &&
+ (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ ctl_set_invalid_field_ciu(&io->scsiio);
} else if ((io->io_hdr.port_status == 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
- if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
+ if (ARGS(io)->flags & CTL_LLF_COMPARE) {
+ /* We have data block ready for comparison. */
+ if (ctl_backend_ramdisk_cmp(io))
+ goto done;
+ }
+ if (ARGS(io)->len > PRIV(io)->len) {
mtx_lock(&be_lun->queue_lock);
STAILQ_INSERT_TAIL(&be_lun->cont_queue,
&io->io_hdr, links);
@@ -244,93 +445,110 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
return (0);
}
ctl_set_success(&io->scsiio);
- } else if ((io->io_hdr.port_status != 0) &&
- ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
- (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
- /*
- * For hardware error sense keys, the sense key
- * specific value is defined to be a retry count,
- * but we use it to pass back an internal FETD
- * error code. XXX KDM Hopefully the FETD is only
- * using 16 bits for an error code, since that's
- * all the space we have in the sks field.
- */
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/
- io->io_hdr.port_status);
}
+done:
ctl_data_submit_done(io);
return(0);
}
-static int
-ctl_backend_ramdisk_submit(union ctl_io *io)
+static void
+ctl_backend_ramdisk_compare(union ctl_io *io)
{
- struct ctl_be_lun *cbe_lun;
- struct ctl_lba_len_flags *lbalen;
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ u_int lbas, len;
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
- lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
- if (lbalen->flags & CTL_LLF_VERIFY) {
- ctl_set_success(&io->scsiio);
- ctl_data_submit_done(io);
- return (CTL_RETVAL_COMPLETE);
- }
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
- lbalen->len * cbe_lun->blocksize;
- ctl_backend_ramdisk_continue(io);
- return (CTL_RETVAL_COMPLETE);
+ lbas = ARGS(io)->len - PRIV(io)->len;
+ lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
+ len = lbas * cbe_lun->blocksize;
+
+ io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
+ io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
+ io->scsiio.kern_data_len = len;
+ io->scsiio.kern_sg_entries = 0;
+ io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ PRIV(io)->len += lbas;
+#ifdef CTL_TIME_IO
+ getbinuptime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
}
static void
-ctl_backend_ramdisk_continue(union ctl_io *io)
+ctl_backend_ramdisk_rw(union ctl_io *io)
{
- struct ctl_be_ramdisk_softc *softc;
- int len, len_filled, sg_filled;
-#ifdef CTL_RAMDISK_PAGES
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
struct ctl_sg_entry *sg_entries;
- int i;
-#endif
-
- softc = &rd_softc;
- len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
-#ifdef CTL_RAMDISK_PAGES
- sg_filled = min(btoc(len), softc->num_pages);
- if (sg_filled > 1) {
+ uint8_t *page;
+ uint64_t lba;
+ u_int i, len, lbaoff, lbas, sgs, off;
+ getpage_op_t op;
+
+ lba = ARGS(io)->lba + PRIV(io)->len;
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ lbas = ARGS(io)->len - PRIV(io)->len;
+ lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
+ sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
+ off = lbaoff * cbe_lun->blocksize;
+ op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
+ if (sgs > 1) {
io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
- sg_filled, M_RAMDISK,
- M_WAITOK);
+ sgs, M_RAMDISK, M_WAITOK);
sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
- for (i = 0, len_filled = 0; i < sg_filled; i++) {
- sg_entries[i].addr = softc->ramdisk_pages[i];
- sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
- len_filled += sg_entries[i].len;
+ len = lbas * cbe_lun->blocksize;
+ for (i = 0; i < sgs; i++) {
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ (lba >> cbe_lun->pblockexp) + i, op);
+ if (page == P_UNMAPPED || page == P_ANCHORED) {
+ free(io->scsiio.kern_data_ptr, M_RAMDISK);
+nospc:
+ ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_data_submit_done(io);
+ return;
+ }
+ sg_entries[i].addr = page + off;
+ sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
+ len -= sg_entries[i].len;
+ off = 0;
}
} else {
- sg_filled = 0;
- len_filled = len;
- io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ lba >> cbe_lun->pblockexp, op);
+ if (page == P_UNMAPPED || page == P_ANCHORED)
+ goto nospc;
+ sgs = 0;
+ io->scsiio.kern_data_ptr = page + off;
}
-#else
- sg_filled = 0;
- len_filled = min(len, softc->rd_size);
- io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
-#endif /* CTL_RAMDISK_PAGES */
io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
- io->scsiio.kern_data_resid = 0;
- io->scsiio.kern_data_len = len_filled;
- io->scsiio.kern_sg_entries = sg_filled;
+ io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
+ io->scsiio.kern_sg_entries = sgs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
+ PRIV(io)->len += lbas;
#ifdef CTL_TIME_IO
getbinuptime(&io->io_hdr.dma_start_bt);
#endif
ctl_datamove(io);
}
+static int
+ctl_backend_ramdisk_submit(union ctl_io *io)
+{
+ struct ctl_lba_len_flags *lbalen = ARGS(io);
+
+ if (lbalen->flags & CTL_LLF_VERIFY) {
+ ctl_set_success(&io->scsiio);
+ ctl_data_submit_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ PRIV(io)->len = 0;
+ if (lbalen->flags & CTL_LLF_COMPARE)
+ ctl_backend_ramdisk_compare(io);
+ else
+ ctl_backend_ramdisk_rw(io);
+ return (CTL_RETVAL_COMPLETE);
+}
+
static void
ctl_backend_ramdisk_worker(void *context, int pending)
{
@@ -338,7 +556,6 @@ ctl_backend_ramdisk_worker(void *context, int pending)
union ctl_io *io;
be_lun = (struct ctl_be_ramdisk_lun *)context;
-
mtx_lock(&be_lun->queue_lock);
for (;;) {
io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
@@ -346,7 +563,10 @@ ctl_backend_ramdisk_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
- ctl_backend_ramdisk_continue(io);
+ if (ARGS(io)->flags & CTL_LLF_COMPARE)
+ ctl_backend_ramdisk_compare(io);
+ else
+ ctl_backend_ramdisk_rw(io);
mtx_lock(&be_lun->queue_lock);
continue;
}
@@ -361,6 +581,259 @@ ctl_backend_ramdisk_worker(void *context, int pending)
}
static int
+ctl_backend_ramdisk_gls(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ struct scsi_get_lba_status_data *data;
+ uint8_t *page;
+ u_int lbaoff;
+
+ data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
+ scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
+ lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
+ if (page == P_UNMAPPED)
+ data->descr[0].status = 1;
+ else if (page == P_ANCHORED)
+ data->descr[0].status = 2;
+ else
+ data->descr[0].status = 0;
+ ctl_config_read_done(io);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_backend_ramdisk_config_read(union ctl_io *io)
+{
+ int retval = 0;
+
+ switch (io->scsiio.cdb[0]) {
+ case SERVICE_ACTION_IN:
+ if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
+ retval = ctl_backend_ramdisk_gls(io);
+ break;
+ }
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
+ ctl_config_read_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ default:
+ ctl_set_invalid_opcode(&io->scsiio);
+ ctl_config_read_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+ return (retval);
+}
+
+static void
+ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
+ int anchor)
+{
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ uint8_t *page;
+ uint64_t p, lp;
+ u_int lbaoff;
+ getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
+
+ /* Partially zero first partial page. */
+ p = lba >> cbe_lun->pblockexp;
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ if (lbaoff != 0) {
+ page = ctl_backend_ramdisk_getpage(be_lun, p, op);
+ if (page != P_UNMAPPED && page != P_ANCHORED) {
+ memset(page + lbaoff * cbe_lun->blocksize, 0,
+ min(len, be_lun->pblockmul - lbaoff) *
+ cbe_lun->blocksize);
+ }
+ p++;
+ }
+
+ /* Partially zero last partial page. */
+ lp = (lba + len) >> cbe_lun->pblockexp;
+ lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
+ if (p <= lp && lbaoff != 0) {
+ page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
+ if (page != P_UNMAPPED && page != P_ANCHORED)
+ memset(page, 0, lbaoff * cbe_lun->blocksize);
+ }
+
+ /* Delete remaining full pages. */
+ if (anchor) {
+ for (; p < lp; p++)
+ ctl_backend_ramdisk_anchorpage(be_lun, p);
+ } else {
+ for (; p < lp; p++)
+ ctl_backend_ramdisk_unmappage(be_lun, p);
+ }
+}
+
+static void
+ctl_backend_ramdisk_ws(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
+ struct ctl_lba_len_flags *lbalen = ARGS(io);
+ uint8_t *page;
+ uint64_t lba;
+ u_int lbaoff, lbas;
+
+ if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 1,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_config_write_done(io);
+ return;
+ }
+ if (lbalen->flags & SWS_UNMAP) {
+ ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
+ (lbalen->flags & SWS_ANCHOR) != 0);
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ return;
+ }
+
+ for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
+ page = ctl_backend_ramdisk_getpage(be_lun,
+ lba >> cbe_lun->pblockexp, GP_WRITE);
+ if (page == P_UNMAPPED || page == P_ANCHORED) {
+ ctl_set_space_alloc_fail(&io->scsiio);
+ ctl_data_submit_done(io);
+ return;
+ }
+ lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
+ page += lbaoff * cbe_lun->blocksize;
+ if (lbalen->flags & SWS_NDOB) {
+ memset(page, 0, cbe_lun->blocksize);
+ } else {
+ memcpy(page, io->scsiio.kern_data_ptr,
+ cbe_lun->blocksize);
+ }
+ if (lbalen->flags & SWS_LBDATA)
+ scsi_ulto4b(lba, page);
+ }
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+}
+
+static void
+ctl_backend_ramdisk_unmap(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
+ struct scsi_unmap_desc *buf, *end;
+
+ if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
+ ctl_set_invalid_field(&io->scsiio,
+ /*sks_valid*/ 0,
+ /*command*/ 0,
+ /*field*/ 0,
+ /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_config_write_done(io);
+ return;
+ }
+
+ buf = (struct scsi_unmap_desc *)ptrlen->ptr;
+ end = buf + ptrlen->len / sizeof(*buf);
+ for (; buf < end; buf++) {
+ ctl_backend_ramdisk_delete(cbe_lun,
+ scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
+ (ptrlen->flags & SU_ANCHOR) != 0);
+ }
+
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+}
+
+static int
+ctl_backend_ramdisk_config_write(union ctl_io *io)
+{
+ struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
+ int retval = 0;
+
+ switch (io->scsiio.cdb[0]) {
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ /* We have no cache to flush. */
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ case START_STOP_UNIT: {
+ struct scsi_start_stop_unit *cdb;
+
+ cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
+ if ((cdb->how & SSS_PC_MASK) != 0) {
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ }
+ if (cdb->how & SSS_START) {
+ if (cdb->how & SSS_LOEJ)
+ ctl_lun_has_media(cbe_lun);
+ ctl_start_lun(cbe_lun);
+ } else {
+ ctl_stop_lun(cbe_lun);
+ if (cdb->how & SSS_LOEJ)
+ ctl_lun_ejected(cbe_lun);
+ }
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ }
+ case PREVENT_ALLOW:
+ ctl_set_success(&io->scsiio);
+ ctl_config_write_done(io);
+ break;
+ case WRITE_SAME_10:
+ case WRITE_SAME_16:
+ ctl_backend_ramdisk_ws(io);
+ break;
+ case UNMAP:
+ ctl_backend_ramdisk_unmap(io);
+ break;
+ default:
+ ctl_set_invalid_opcode(&io->scsiio);
+ ctl_config_write_done(io);
+ retval = CTL_RETVAL_COMPLETE;
+ break;
+ }
+
+ return (retval);
+}
+
+static uint64_t
+ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname)
+{
+ struct ctl_be_ramdisk_lun *be_lun = arg;
+ uint64_t val;
+
+ val = UINT64_MAX;
+ if (be_lun->cap_bytes == 0)
+ return (val);
+ sx_slock(&be_lun->page_lock);
+ if (strcmp(attrname, "blocksused") == 0) {
+ val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
+ } else if (strcmp(attrname, "blocksavail") == 0) {
+ val = (be_lun->cap_bytes - be_lun->cap_used) /
+ be_lun->cbe_lun.blocksize;
+ }
+ sx_sunlock(&be_lun->page_lock);
+ return (val);
+}
+
+static int
ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
int flag, struct thread *td)
{
@@ -477,6 +950,9 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
taskqueue_drain_all(be_lun->io_taskqueue);
taskqueue_free(be_lun->io_taskqueue);
ctl_free_opts(&be_lun->cbe_lun.options);
+ free(be_lun->zero_page, M_RAMDISK);
+ ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
+ sx_destroy(&be_lun->page_lock);
mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
@@ -498,6 +974,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_create_params *params;
char *value;
char tmpstr[32];
+ uint64_t t;
int retval;
retval = 0;
@@ -524,6 +1001,19 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
+ be_lun->pblocksize = PAGE_SIZE;
+ value = ctl_get_opt(&cbe_lun->options, "pblocksize");
+ if (value != NULL) {
+ ctl_expand_number(value, &t);
+ be_lun->pblocksize = t;
+ }
+ if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: unsupported pblocksize %u", __func__,
+ be_lun->pblocksize);
+ goto bailout_error;
+ }
+
if (cbe_lun->lun_type == T_DIRECT ||
cbe_lun->lun_type == T_CDROM) {
if (params->blocksize_bytes != 0)
@@ -532,6 +1022,14 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
cbe_lun->blocksize = 2048;
else
cbe_lun->blocksize = 512;
+ be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
+ if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: pblocksize %u not exp2 of blocksize %u",
+ __func__,
+ be_lun->pblocksize, cbe_lun->blocksize);
+ goto bailout_error;
+ }
if (params->lun_size_bytes < cbe_lun->blocksize) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: LUN size %ju < blocksize %u", __func__,
@@ -540,9 +1038,25 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
}
be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
+ be_lun->indir = 0;
+ t = be_lun->size_bytes / be_lun->pblocksize;
+ while (t > 1) {
+ t /= PPP;
+ be_lun->indir++;
+ }
cbe_lun->maxlba = be_lun->size_blocks - 1;
- cbe_lun->atomicblock = UINT32_MAX;
- cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
+ cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
+ cbe_lun->pblockoff = 0;
+ cbe_lun->ublockexp = cbe_lun->pblockexp;
+ cbe_lun->ublockoff = 0;
+ cbe_lun->atomicblock = be_lun->pblocksize;
+ cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
+ value = ctl_get_opt(&cbe_lun->options, "capacity");
+ if (value != NULL)
+ ctl_expand_number(value, &be_lun->cap_bytes);
+ } else {
+ be_lun->pblockmul = 1;
+ cbe_lun->pblockexp = 0;
}
/* Tell the user the blocksize we ended up using */
@@ -550,7 +1064,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
params->lun_size_bytes = be_lun->size_bytes;
value = ctl_get_opt(&cbe_lun->options, "unmap");
- if (value != NULL && strcmp(value, "on") == 0)
+ if (value == NULL || strcmp(value, "off") != 0)
cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
value = ctl_get_opt(&cbe_lun->options, "readonly");
if (value != NULL) {
@@ -605,6 +1119,11 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
}
STAILQ_INIT(&be_lun->cont_queue);
+ sx_init(&be_lun->page_lock, "cram page lock");
+ if (be_lun->cap_bytes == 0)
+ be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
+ be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
+ M_WAITOK|M_ZERO);
mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
be_lun);
@@ -679,10 +1198,12 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
bailout_error:
req->status = CTL_LUN_ERROR;
if (be_lun != NULL) {
- if (be_lun->io_taskqueue != NULL) {
+ if (be_lun->io_taskqueue != NULL)
taskqueue_free(be_lun->io_taskqueue);
- }
ctl_free_opts(&cbe_lun->options);
+ free(be_lun->zero_page, M_RAMDISK);
+ ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
+ sx_destroy(&be_lun->page_lock);
mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
@@ -838,104 +1359,3 @@ ctl_backend_ramdisk_lun_config_status(void *be_lun,
}
mtx_unlock(&softc->lock);
}
-
-static int
-ctl_backend_ramdisk_config_write(union ctl_io *io)
-{
- struct ctl_be_lun *cbe_lun;
- int retval;
-
- cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
- CTL_PRIV_BACKEND_LUN].ptr;
- retval = 0;
- switch (io->scsiio.cdb[0]) {
- case SYNCHRONIZE_CACHE:
- case SYNCHRONIZE_CACHE_16:
- /*
- * The upper level CTL code will filter out any CDBs with
- * the immediate bit set and return the proper error. It
- * will also not allow a sync cache command to go to a LUN
- * that is powered down.
- *
- * We don't really need to worry about what LBA range the
- * user asked to be synced out. When they issue a sync
- * cache command, we'll sync out the whole thing.
- *
- * This is obviously just a stubbed out implementation.
- * The real implementation will be in the RAIDCore/CTL
- * interface, and can only really happen when RAIDCore
- * implements a per-array cache sync.
- */
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- case START_STOP_UNIT: {
- struct scsi_start_stop_unit *cdb;
-
- cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
- if ((cdb->how & SSS_PC_MASK) != 0) {
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- }
- if (cdb->how & SSS_START) {
- if (cdb->how & SSS_LOEJ)
- ctl_lun_has_media(cbe_lun);
- ctl_start_lun(cbe_lun);
- } else {
- ctl_stop_lun(cbe_lun);
- if (cdb->how & SSS_LOEJ)
- ctl_lun_ejected(cbe_lun);
- }
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- }
- case PREVENT_ALLOW:
- case WRITE_SAME_10:
- case WRITE_SAME_16:
- case UNMAP:
- ctl_set_success(&io->scsiio);
- ctl_config_write_done(io);
- break;
- default:
- ctl_set_invalid_opcode(&io->scsiio);
- ctl_config_write_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- }
-
- return (retval);
-}
-
-static int
-ctl_backend_ramdisk_config_read(union ctl_io *io)
-{
- int retval = 0;
-
- switch (io->scsiio.cdb[0]) {
- case SERVICE_ACTION_IN:
- if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
- /* We have nothing to tell, leave default data. */
- ctl_config_read_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- }
- ctl_set_invalid_field(&io->scsiio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_config_read_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- default:
- ctl_set_invalid_opcode(&io->scsiio);
- ctl_config_read_done(io);
- retval = CTL_RETVAL_COMPLETE;
- break;
- }
-
- return (retval);
-}
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
index eaedea6..e42df74 100644
--- a/sys/cam/ctl/ctl_cmd_table.c
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -990,7 +990,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_NO_MEDIA |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE, 6, {0x11, 0, 0, 0xff, 0x07}},
+ CTL_LUN_PAT_NONE, 6, {0x13, 0, 0, 0xff, 0x07}},
/* 16 RESERVE(6) */
{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
@@ -1260,7 +1260,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_NO_MEDIA |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE, 10, {0x11, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
+ CTL_LUN_PAT_NONE, 10, {0x13, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
/* 56 RESERVE(10) */
{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
diff --git a/sys/cam/ctl/ctl_error.c b/sys/cam/ctl/ctl_error.c
index 02330a1..9c222fa 100644
--- a/sys/cam/ctl/ctl_error.c
+++ b/sys/cam/ctl/ctl_error.c
@@ -129,7 +129,7 @@ ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key,
* completed. Therefore we can safely access the LUN structure and
* flags without the lock.
*/
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ lun = CTL_LUN(ctsio);
va_start(ap, ascq);
sense_len = 0;
@@ -641,6 +641,18 @@ ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
/*data*/ sks,
SSD_ELEM_NONE);
}
+void
+ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio)
+{
+
+ /* "Invalid field in command information unit" */
+ ctl_set_sense(ctsio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ABORTED_COMMAND,
+ /*ascq*/ 0x0E,
+ /*ascq*/ 0x03,
+ SSD_ELEM_NONE);
+}
void
ctl_set_invalid_opcode(struct ctl_scsiio *ctsio)
diff --git a/sys/cam/ctl/ctl_error.h b/sys/cam/ctl/ctl_error.h
index d4cdbb3..75c948c 100644
--- a/sys/cam/ctl/ctl_error.h
+++ b/sys/cam/ctl/ctl_error.h
@@ -66,6 +66,7 @@ void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);
void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag);
void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
int field, int bit_valid, int bit);
+void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio);
void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
void ctl_set_already_locked(struct ctl_scsiio *ctsio);
diff --git a/sys/cam/ctl/ctl_frontend.c b/sys/cam/ctl/ctl_frontend.c
index 75837b5..765a31d 100644
--- a/sys/cam/ctl/ctl_frontend.c
+++ b/sys/cam/ctl/ctl_frontend.c
@@ -1,5 +1,6 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,12 +70,11 @@ ctl_frontend_register(struct ctl_frontend *fe)
{
struct ctl_softc *softc = control_softc;
struct ctl_frontend *fe_tmp;
+ int error;
KASSERT(softc != NULL, ("CTL is not initialized"));
- /*
- * Sanity check, make sure this isn't a duplicate registration.
- */
+ /* Sanity check, make sure this isn't a duplicate registration. */
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(fe_tmp, &softc->fe_list, links) {
if (strcmp(fe_tmp->name, fe->name) == 0) {
@@ -85,11 +85,14 @@ ctl_frontend_register(struct ctl_frontend *fe)
mtx_unlock(&softc->ctl_lock);
STAILQ_INIT(&fe->port_list);
- /*
- * Call the frontend's initialization routine.
- */
- if (fe->init != NULL)
- fe->init();
+ /* Call the frontend's initialization routine. */
+ if (fe->init != NULL) {
+ if ((error = fe->init()) != 0) {
+ printf("%s frontend init error: %d\n",
+ fe->name, error);
+ return (error);
+ }
+ }
mtx_lock(&softc->ctl_lock);
softc->num_frontends++;
@@ -102,20 +105,21 @@ int
ctl_frontend_deregister(struct ctl_frontend *fe)
{
struct ctl_softc *softc = control_softc;
-
- if (!STAILQ_EMPTY(&fe->port_list))
- return (-1);
+ int error;
+
+ /* Call the frontend's shutdown routine.*/
+ if (fe->shutdown != NULL) {
+ if ((error = fe->shutdown()) != 0) {
+ printf("%s frontend shutdown error: %d\n",
+ fe->name, error);
+ return (error);
+ }
+ }
mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->fe_list, fe, ctl_frontend, links);
softc->num_frontends--;
mtx_unlock(&softc->ctl_lock);
-
- /*
- * Call the frontend's shutdown routine.
- */
- if (fe->shutdown != NULL)
- fe->shutdown();
return (0);
}
@@ -192,13 +196,14 @@ error:
mtx_unlock(&softc->ctl_lock);
return (retval);
}
+ port->targ_port = port_num;
port->ctl_pool_ref = pool;
-
if (port->options.stqh_first == NULL)
STAILQ_INIT(&port->options);
+ port->stats.item = port_num;
+ mtx_init(&port->port_lock, "CTL port", NULL, MTX_DEF);
mtx_lock(&softc->ctl_lock);
- port->targ_port = port_num;
STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list);
nport != NULL && nport->targ_port < port_num;
@@ -218,17 +223,11 @@ int
ctl_port_deregister(struct ctl_port *port)
{
struct ctl_softc *softc = port->ctl_softc;
- struct ctl_io_pool *pool;
- int retval, i;
-
- retval = 0;
+ struct ctl_io_pool *pool = (struct ctl_io_pool *)port->ctl_pool_ref;
+ int i;
- pool = (struct ctl_io_pool *)port->ctl_pool_ref;
-
- if (port->targ_port == -1) {
- retval = 1;
- goto bailout;
- }
+ if (port->targ_port == -1)
+ return (1);
mtx_lock(&softc->ctl_lock);
STAILQ_REMOVE(&softc->port_list, port, ctl_port, links);
@@ -251,9 +250,9 @@ ctl_port_deregister(struct ctl_port *port)
for (i = 0; i < port->max_initiators; i++)
free(port->wwpn_iid[i].name, M_CTL);
free(port->wwpn_iid, M_CTL);
+ mtx_destroy(&port->port_lock);
-bailout:
- return (retval);
+ return (0);
}
void
@@ -315,9 +314,9 @@ ctl_port_online(struct ctl_port *port)
if (port->lun_enable != NULL) {
if (port->lun_map) {
- for (l = 0; l < CTL_MAX_LUNS; l++) {
- if (ctl_lun_map_from_port(port, l) >=
- CTL_MAX_LUNS)
+ for (l = 0; l < port->lun_map_size; l++) {
+ if (ctl_lun_map_from_port(port, l) ==
+ UINT32_MAX)
continue;
port->lun_enable(port->targ_lun_arg, l);
}
@@ -338,7 +337,7 @@ ctl_port_online(struct ctl_port *port)
}
port->status |= CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
@@ -359,9 +358,9 @@ ctl_port_offline(struct ctl_port *port)
port->port_offline(port->onoff_arg);
if (port->lun_disable != NULL) {
if (port->lun_map) {
- for (l = 0; l < CTL_MAX_LUNS; l++) {
- if (ctl_lun_map_from_port(port, l) >=
- CTL_MAX_LUNS)
+ for (l = 0; l < port->lun_map_size; l++) {
+ if (ctl_lun_map_from_port(port, l) ==
+ UINT32_MAX)
continue;
port->lun_disable(port->targ_lun_arg, l);
}
@@ -373,7 +372,7 @@ ctl_port_offline(struct ctl_port *port)
mtx_lock(&softc->ctl_lock);
port->status &= ~CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
diff --git a/sys/cam/ctl/ctl_frontend.h b/sys/cam/ctl/ctl_frontend.h
index 1dd970a..38eb863 100644
--- a/sys/cam/ctl/ctl_frontend.h
+++ b/sys/cam/ctl/ctl_frontend.h
@@ -1,5 +1,6 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +40,8 @@
#ifndef _CTL_FRONTEND_H_
#define _CTL_FRONTEND_H_
+#include <cam/ctl/ctl_ioctl.h>
+
typedef enum {
CTL_PORT_STATUS_NONE = 0x00,
CTL_PORT_STATUS_ONLINE = 0x01,
@@ -46,7 +49,7 @@ typedef enum {
} ctl_port_status;
typedef int (*fe_init_t)(void);
-typedef void (*fe_shutdown_t)(void);
+typedef int (*fe_shutdown_t)(void);
typedef void (*port_func_t)(void *onoff_arg);
typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb);
typedef int (*lun_func_t)(void *arg, int lun_id);
@@ -58,12 +61,13 @@ typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
{ \
switch (type) { \
case MOD_LOAD: \
- ctl_frontend_register( \
- (struct ctl_frontend *)data); \
+ return (ctl_frontend_register( \
+ (struct ctl_frontend *)data)); \
break; \
case MOD_UNLOAD: \
- printf(#name " module unload - not possible for this module type\n"); \
- return EINVAL; \
+ return (ctl_frontend_deregister( \
+ (struct ctl_frontend *)data)); \
+ break; \
default: \
return EOPNOTSUPP; \
} \
@@ -225,6 +229,7 @@ struct ctl_port {
void *onoff_arg; /* passed to CTL */
lun_func_t lun_enable; /* passed to CTL */
lun_func_t lun_disable; /* passed to CTL */
+ int lun_map_size; /* passed to CTL */
uint32_t *lun_map; /* passed to CTL */
void *targ_lun_arg; /* passed to CTL */
void (*fe_datamove)(union ctl_io *io); /* passed to CTL */
@@ -242,6 +247,8 @@ struct ctl_port {
struct ctl_devid *port_devid; /* passed to CTL */
struct ctl_devid *target_devid; /* passed to CTL */
struct ctl_devid *init_devid; /* passed to CTL */
+ struct ctl_io_stats stats; /* used by CTL */
+ struct mtx port_lock; /* used by CTL */
STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */
STAILQ_ENTRY(ctl_port) links; /* used by CTL */
};
diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c
index 2166f45..9a0ca9a 100644
--- a/sys/cam/ctl/ctl_frontend_cam_sim.c
+++ b/sys/cam/ctl/ctl_frontend_cam_sim.c
@@ -94,15 +94,14 @@ struct cfcs_softc {
CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \
CAM_SENSE_PHYS)
-int cfcs_init(void);
+static int cfcs_init(void);
+static int cfcs_shutdown(void);
static void cfcs_poll(struct cam_sim *sim);
static void cfcs_online(void *arg);
static void cfcs_offline(void *arg);
static void cfcs_datamove(union ctl_io *io);
static void cfcs_done(union ctl_io *io);
void cfcs_action(struct cam_sim *sim, union ccb *ccb);
-static void cfcs_async(void *callback_arg, uint32_t code,
- struct cam_path *path, void *arg);
struct cfcs_softc cfcs_softc;
/*
@@ -121,14 +120,14 @@ static struct ctl_frontend cfcs_frontend =
{
.name = "camsim",
.init = cfcs_init,
+ .shutdown = cfcs_shutdown,
};
CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend);
-int
+static int
cfcs_init(void)
{
struct cfcs_softc *softc;
- struct ccb_setasync csa;
struct ctl_port *port;
int retval;
@@ -214,13 +213,6 @@ cfcs_init(void)
goto bailout;
}
- xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE);
- csa.ccb_h.func_code = XPT_SASYNC_CB;
- csa.event_enable = AC_LOST_DEVICE;
- csa.callback = cfcs_async;
- csa.callback_arg = softc->sim;
- xpt_action((union ccb *)&csa);
-
mtx_unlock(&softc->lock);
return (retval);
@@ -236,6 +228,27 @@ bailout:
return (retval);
}
+static int
+cfcs_shutdown(void)
+{
+ struct cfcs_softc *softc = &cfcs_softc;
+ struct ctl_port *port = &softc->port;
+ int error;
+
+ ctl_port_offline(port);
+
+ mtx_lock(&softc->lock);
+ xpt_free_path(softc->path);
+ xpt_bus_deregister(cam_sim_path(softc->sim));
+ cam_sim_free(softc->sim, /*free_devq*/ TRUE);
+ mtx_unlock(&softc->lock);
+ mtx_destroy(&softc->lock);
+
+ if ((error = ctl_port_deregister(port)) != 0)
+ printf("%s: cam_sim port deregistration failed\n", __func__);
+ return (error);
+}
+
static void
cfcs_poll(struct cam_sim *sim)
{
@@ -300,14 +313,10 @@ cfcs_datamove(union ctl_io *io)
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
int cam_sg_count, ctl_sg_count, cam_sg_start;
int cam_sg_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int ctl_watermark, cam_watermark;
int i, j;
-
- cam_sg_offset = 0;
- cam_sg_start = 0;
-
ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
/*
@@ -330,6 +339,8 @@ cfcs_datamove(union ctl_io *io)
cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
cam_sg_count = ccb->csio.sglist_cnt;
+ cam_sg_start = cam_sg_count;
+ cam_sg_offset = 0;
for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
if ((len_seen + cam_sglist[i].ds_len) >=
@@ -367,7 +378,6 @@ cfcs_datamove(union ctl_io *io)
ctl_watermark = 0;
cam_watermark = cam_sg_offset;
- len_copied = 0;
for (i = cam_sg_start, j = 0;
i < cam_sg_count && j < ctl_sg_count;) {
uint8_t *cam_ptr, *ctl_ptr;
@@ -389,9 +399,6 @@ cfcs_datamove(union ctl_io *io)
ctl_ptr = (uint8_t *)ctl_sglist[j].addr;
ctl_ptr = ctl_ptr + ctl_watermark;
- ctl_watermark += len_to_copy;
- cam_watermark += len_to_copy;
-
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n",
@@ -407,24 +414,27 @@ cfcs_datamove(union ctl_io *io)
bcopy(cam_ptr, ctl_ptr, len_to_copy);
}
- len_copied += len_to_copy;
+ io->scsiio.ext_data_filled += len_to_copy;
+ io->scsiio.kern_data_resid -= len_to_copy;
+ cam_watermark += len_to_copy;
if (cam_sglist[i].ds_len == cam_watermark) {
i++;
cam_watermark = 0;
}
+ ctl_watermark += len_to_copy;
if (ctl_sglist[j].len == ctl_watermark) {
j++;
ctl_watermark = 0;
}
}
- io->scsiio.ext_data_filled += len_copied;
-
if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
+ ccb->csio.resid = ccb->csio.dxfer_len -
+ io->scsiio.ext_data_filled;
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_REQ_CMP;
xpt_done(ccb);
@@ -453,6 +463,10 @@ cfcs_done(union ctl_io *io)
/*
* Translate CTL status to CAM status.
*/
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ ccb->csio.resid = ccb->csio.dxfer_len -
+ io->scsiio.ext_data_filled;
+ }
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
switch (io->io_hdr.status & CTL_STATUS_MASK) {
case CTL_SUCCESS:
@@ -587,8 +601,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
__func__, csio->cdb_len, sizeof(io->scsiio.cdb));
}
io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb));
- bcopy(csio->cdb_io.cdb_bytes, io->scsiio.cdb,
- io->scsiio.cdb_len);
+ bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len);
ccb->ccb_h.status |= CAM_SIM_QUEUED;
err = ctl_queue(io);
@@ -801,9 +814,3 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
break;
}
}
-
-static void
-cfcs_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
-{
-
-}
diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c
index 97f29f1..4063c97 100644
--- a/sys/cam/ctl/ctl_frontend_ioctl.c
+++ b/sys/cam/ctl/ctl_frontend_ioctl.c
@@ -76,7 +76,7 @@ struct cfi_softc {
static struct cfi_softc cfi_softc;
static int cfi_init(void);
-static void cfi_shutdown(void);
+static int cfi_shutdown(void);
static void cfi_datamove(union ctl_io *io);
static void cfi_done(union ctl_io *io);
@@ -93,6 +93,7 @@ cfi_init(void)
{
struct cfi_softc *isoftc = &cfi_softc;
struct ctl_port *port;
+ int error = 0;
memset(isoftc, 0, sizeof(*isoftc));
@@ -108,24 +109,25 @@ cfi_init(void)
port->targ_port = -1;
port->max_initiators = 1;
- if (ctl_port_register(port) != 0) {
+ if ((error = ctl_port_register(port)) != 0) {
printf("%s: ioctl port registration failed\n", __func__);
- return (0);
+ return (error);
}
ctl_port_online(port);
return (0);
}
-void
+static int
cfi_shutdown(void)
{
struct cfi_softc *isoftc = &cfi_softc;
- struct ctl_port *port;
+ struct ctl_port *port = &isoftc->port;
+ int error = 0;
- port = &isoftc->port;
ctl_port_offline(port);
- if (ctl_port_deregister(&isoftc->port) != 0)
- printf("%s: ctl_frontend_deregister() failed\n", __func__);
+ if ((error = ctl_port_deregister(port)) != 0)
+ printf("%s: ioctl port deregistration failed\n", __func__);
+ return (error);
}
/*
@@ -138,22 +140,20 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sglen, ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int kern_watermark, ext_watermark;
int ext_sglist_malloced;
int i, j;
- ext_sglist_malloced = 0;
- ext_sg_start = 0;
- ext_offset = 0;
-
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
/*
* If this flag is set, fake the data transfer.
*/
if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
- ctsio->ext_data_filled = ctsio->ext_data_len;
+ ext_sglist_malloced = 0;
+ ctsio->ext_data_filled += ctsio->kern_data_len;
+ ctsio->kern_data_resid = 0;
goto bailout;
}
@@ -165,7 +165,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
int len_seen;
ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
-
ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
M_WAITOK);
ext_sglist_malloced = 1;
@@ -174,6 +173,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
goto bailout;
}
ext_sg_entries = ctsio->ext_sg_entries;
+ ext_sg_start = ext_sg_entries;
+ ext_offset = 0;
len_seen = 0;
for (i = 0; i < ext_sg_entries; i++) {
if ((len_seen + ext_sglist[i].len) >=
@@ -186,6 +187,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
} else {
ext_sglist = &ext_entry;
+ ext_sglist_malloced = 0;
ext_sglist->addr = ctsio->ext_data_ptr;
ext_sglist->len = ctsio->ext_data_len;
ext_sg_entries = 1;
@@ -203,10 +205,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_sg_entries = 1;
}
-
kern_watermark = 0;
ext_watermark = ext_offset;
- len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@@ -228,9 +228,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
- kern_watermark += len_to_copy;
- ext_watermark += len_to_copy;
-
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
@@ -252,21 +249,22 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
}
- len_copied += len_to_copy;
+ ctsio->ext_data_filled += len_to_copy;
+ ctsio->kern_data_resid -= len_to_copy;
+ ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
+ kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
- ctsio->ext_data_filled += len_copied;
-
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
"kern_sg_entries: %d\n", ext_sg_entries,
kern_sg_entries));
@@ -274,10 +272,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
"kern_data_len = %d\n", ctsio->ext_data_len,
ctsio->kern_data_len));
-
- /* XXX KDM set residual?? */
bailout:
-
if (ext_sglist_malloced != 0)
free(ext_sglist, M_CTL);
@@ -397,7 +392,7 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
union ctl_io *io;
- void *pool_tmp;
+ void *pool_tmp, *sc_tmp;
int retval = 0;
/*
@@ -414,8 +409,10 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* spammed by the user's ctl_io.
*/
pool_tmp = io->io_hdr.pool;
+ sc_tmp = CTL_SOFTC(io);
memcpy(io, (void *)addr, sizeof(*io));
io->io_hdr.pool = pool_tmp;
+ CTL_SOFTC(io) = sc_tmp;
/*
* No status yet, so make sure the status is set properly.
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c
index b844be6..8a47513 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.c
+++ b/sys/cam/ctl/ctl_frontend_iscsi.c
@@ -146,7 +146,8 @@ SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
#define PDU_TOTAL_TRANSFER_LEN(X) (X)->ip_prv1
#define PDU_R2TSN(X) (X)->ip_prv2
-int cfiscsi_init(void);
+static int cfiscsi_init(void);
+static int cfiscsi_shutdown(void);
static void cfiscsi_online(void *arg);
static void cfiscsi_offline(void *arg);
static int cfiscsi_info(void *arg, struct sbuf *sb);
@@ -178,6 +179,7 @@ static struct ctl_frontend cfiscsi_frontend =
.name = "iscsi",
.init = cfiscsi_init,
.ioctl = cfiscsi_ioctl,
+ .shutdown = cfiscsi_shutdown,
};
CTL_FRONTEND_DECLARE(ctlcfiscsi, cfiscsi_frontend);
MODULE_DEPEND(ctlcfiscsi, icl, 1, 1, 1);
@@ -765,6 +767,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
cdw->cdw_sg_len -= copy_len;
off += copy_len;
io->scsiio.ext_data_filled += copy_len;
+ io->scsiio.kern_data_resid -= copy_len;
if (cdw->cdw_sg_len == 0) {
/*
@@ -1273,7 +1276,7 @@ cfiscsi_session_delete(struct cfiscsi_session *cs)
free(cs, M_CFISCSI);
}
-int
+static int
cfiscsi_init(void)
{
struct cfiscsi_softc *softc;
@@ -1296,6 +1299,23 @@ cfiscsi_init(void)
return (0);
}
+static int
+cfiscsi_shutdown(void)
+{
+ struct cfiscsi_softc *softc = &cfiscsi_softc;
+
+ if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets))
+ return (EBUSY);
+
+ uma_zdestroy(cfiscsi_data_wait_zone);
+#ifdef ICL_KERNEL_PROXY
+ cv_destroy(&softc->accept_cv);
+#endif
+ cv_destroy(&softc->sessions_cv);
+ mtx_destroy(&softc->lock);
+ return (0);
+}
+
#ifdef ICL_KERNEL_PROXY
static void
cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id)
@@ -1998,7 +2018,8 @@ cfiscsi_ioctl_port_create(struct ctl_req *req)
if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) {
req->status = CTL_LUN_ERROR;
snprintf(req->error_str, sizeof(req->error_str),
- "target \"%s\" already exists", target);
+ "target \"%s\" for portal group tag %u already exists",
+ target, tag);
cfiscsi_target_release(ct);
ctl_free_opts(&opts);
return;
@@ -2425,6 +2446,7 @@ cfiscsi_datamove_in(union ctl_io *io)
}
sg_addr += len;
sg_len -= len;
+ io->scsiio.kern_data_resid -= len;
KASSERT(buffer_offset + response->ip_data_len <= expected_len,
("buffer_offset %zd + ip_data_len %zd > expected_len %zd",
@@ -2510,7 +2532,7 @@ cfiscsi_datamove_out(union ctl_io *io)
struct iscsi_bhs_r2t *bhsr2t;
struct cfiscsi_data_wait *cdw;
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
- uint32_t expected_len, r2t_off, r2t_len;
+ uint32_t expected_len, datamove_len, r2t_off, r2t_len;
uint32_t target_transfer_tag;
bool done;
@@ -2529,16 +2551,15 @@ cfiscsi_datamove_out(union ctl_io *io)
PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len;
/*
- * Report write underflow as error since CTL and backends don't
- * really support it, and SCSI does not tell how to do it right.
+ * Complete write underflow. Not a single byte to read. Return.
*/
expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length);
- if (io->scsiio.kern_rel_offset + io->scsiio.kern_data_len >
- expected_len) {
- io->scsiio.io_hdr.port_status = 43;
+ if (io->scsiio.kern_rel_offset >= expected_len) {
io->scsiio.be_move_done(io);
return;
}
+ datamove_len = MIN(io->scsiio.kern_data_len,
+ expected_len - io->scsiio.kern_rel_offset);
target_transfer_tag =
atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1);
@@ -2560,7 +2581,7 @@ cfiscsi_datamove_out(union ctl_io *io)
cdw->cdw_ctl_io = io;
cdw->cdw_target_transfer_tag = target_transfer_tag;
cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
- cdw->cdw_r2t_end = io->scsiio.kern_data_len;
+ cdw->cdw_r2t_end = datamove_len;
cdw->cdw_datasn = 0;
/* Set initial data pointer for the CDW respecting ext_data_filled. */
@@ -2569,7 +2590,7 @@ cfiscsi_datamove_out(union ctl_io *io)
} else {
ctl_sglist = &ctl_sg_entry;
ctl_sglist->addr = io->scsiio.kern_data_ptr;
- ctl_sglist->len = io->scsiio.kern_data_len;
+ ctl_sglist->len = datamove_len;
}
cdw->cdw_sg_index = 0;
cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
@@ -2600,7 +2621,7 @@ cfiscsi_datamove_out(union ctl_io *io)
}
r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled;
- r2t_len = MIN(io->scsiio.kern_data_len - io->scsiio.ext_data_filled,
+ r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled,
cs->cs_max_burst_length);
cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len;
diff --git a/sys/cam/ctl/ctl_ha.c b/sys/cam/ctl/ctl_ha.c
index 1d85847..9a63240 100644
--- a/sys/cam/ctl/ctl_ha.c
+++ b/sys/cam/ctl/ctl_ha.c
@@ -1001,7 +1001,7 @@ ctl_ha_msg_shutdown(struct ctl_softc *ctl_softc)
softc->ha_shutdown = 1;
softc->ha_wakeup = 1;
wakeup(&softc->ha_wakeup);
- while (softc->ha_shutdown < 2) {
+ while (softc->ha_shutdown < 2 && !SCHEDULER_STOPPED()) {
msleep(&softc->ha_wakeup, &softc->ha_lock, 0,
"shutdown", hz);
}
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index bad030f..9c472f5 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -145,7 +145,9 @@ struct ctl_ptr_len_flags {
union ctl_priv {
uint8_t bytes[sizeof(uint64_t) * 2];
uint64_t integer;
+ uint64_t integers[2];
void *ptr;
+ void *ptrs[2];
};
/*
@@ -164,6 +166,12 @@ union ctl_priv {
#define CTL_PRIV_FRONTEND 4 /* Frontend storage */
#define CTL_PRIV_FRONTEND2 5 /* Another frontend storage */
+#define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0])
+#define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1])
+#define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0])
+#define CTL_PORT(io) (((struct ctl_softc *)CTL_SOFTC(io))-> \
+ ctl_ports[(io)->io_hdr.nexus.targ_port])
+
#define CTL_INVALID_PORTNAME 0xFF
#define CTL_UNMAPPED_IID 0xFF
@@ -312,7 +320,7 @@ struct ctl_scsiio {
uint8_t sense_len; /* Returned sense length */
uint8_t scsi_status; /* SCSI status byte */
uint8_t sense_residual; /* Unused. */
- uint32_t residual; /* data residual length */
+ uint32_t residual; /* Unused */
uint32_t tag_num; /* tag number */
ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/
uint8_t cdb_len; /* CDB length */
@@ -365,7 +373,7 @@ struct ctl_taskio {
/*
* HA link messages.
*/
-#define CTL_HA_VERSION 1
+#define CTL_HA_VERSION 3
/*
* Used for CTL_MSG_LOGIN.
@@ -461,7 +469,8 @@ struct ctl_ha_msg_dt {
};
/*
- * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU.
+ * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU,
+ * and CTL_MSG_DATAMOVE_DONE.
*/
struct ctl_ha_msg_scsi {
struct ctl_ha_msg_hdr hdr;
@@ -471,10 +480,9 @@ struct ctl_ha_msg_scsi {
uint8_t cdb_len; /* CDB length */
uint8_t scsi_status; /* SCSI status byte */
uint8_t sense_len; /* Returned sense length */
- uint8_t sense_residual; /* sense residual length */
- uint32_t residual; /* data residual length */
- uint32_t fetd_status; /* trans status, set by FETD,
+ uint32_t port_status; /* trans status, set by FETD,
0 = good*/
+ uint32_t kern_data_resid; /* for DATAMOVE_DONE */
struct scsi_sense_data sense_data; /* sense data */
};
diff --git a/sys/cam/ctl/ctl_ioctl.h b/sys/cam/ctl/ctl_ioctl.h
index e8f6b87..ee4ce18 100644
--- a/sys/cam/ctl/ctl_ioctl.h
+++ b/sys/cam/ctl/ctl_ioctl.h
@@ -1,6 +1,7 @@
/*-
* Copyright (c) 2003 Silicon Graphics International Corp.
* Copyright (c) 2011 Spectra Logic Corporation
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,6 +81,9 @@
/* Hopefully this won't conflict with new misc devices that pop up */
#define CTL_MINOR 225
+/* Legacy statistics accumulated for every port for every LU. */
+#define CTL_LEGACY_STATS 1
+
typedef enum {
CTL_DELAY_TYPE_NONE,
CTL_DELAY_TYPE_CONT,
@@ -117,6 +121,18 @@ typedef enum {
#define CTL_STATS_NUM_TYPES 3
typedef enum {
+ CTL_SS_OK,
+ CTL_SS_NEED_MORE_SPACE,
+ CTL_SS_ERROR
+} ctl_stats_status;
+
+typedef enum {
+ CTL_STATS_FLAG_NONE = 0x00,
+ CTL_STATS_FLAG_TIME_VALID = 0x01
+} ctl_stats_flags;
+
+#ifdef CTL_LEGACY_STATS
+typedef enum {
CTL_LUN_STATS_NO_BLOCKSIZE = 0x01
} ctl_lun_stats_flags;
@@ -137,17 +153,6 @@ struct ctl_lun_io_stats {
struct ctl_lun_io_port_stats ports[CTL_MAX_PORTS];
};
-typedef enum {
- CTL_SS_OK,
- CTL_SS_NEED_MORE_SPACE,
- CTL_SS_ERROR
-} ctl_stats_status;
-
-typedef enum {
- CTL_STATS_FLAG_NONE = 0x00,
- CTL_STATS_FLAG_TIME_VALID = 0x01
-} ctl_stats_flags;
-
struct ctl_stats {
int alloc_len; /* passed to kernel */
struct ctl_lun_io_stats *lun_stats; /* passed to/from kernel */
@@ -157,6 +162,27 @@ struct ctl_stats {
ctl_stats_flags flags; /* passed to userland */
struct timespec timestamp; /* passed to userland */
};
+#endif /* CTL_LEGACY_STATS */
+
+struct ctl_io_stats {
+ uint32_t item;
+ uint64_t bytes[CTL_STATS_NUM_TYPES];
+ uint64_t operations[CTL_STATS_NUM_TYPES];
+ uint64_t dmas[CTL_STATS_NUM_TYPES];
+ struct bintime time[CTL_STATS_NUM_TYPES];
+ struct bintime dma_time[CTL_STATS_NUM_TYPES];
+};
+
+struct ctl_get_io_stats {
+ struct ctl_io_stats *stats; /* passed to/from kernel */
+ size_t alloc_len; /* passed to kernel */
+ size_t fill_len; /* passed to userland */
+ int first_item; /* passed to kernel */
+ int num_items; /* passed to userland */
+ ctl_stats_status status; /* passed to userland */
+ ctl_stats_flags flags; /* passed to userland */
+ struct timespec timestamp; /* passed to userland */
+};
/*
* The types of errors that can be injected:
@@ -342,12 +368,54 @@ typedef enum {
CTL_LUNREQ_MODIFY,
} ctl_lunreq_type;
+/*
+ * The ID_REQ flag is used to say that the caller has requested a
+ * particular LUN ID in the req_lun_id field. If we cannot allocate that
+ * LUN ID, the ctl_add_lun() call will fail.
+ *
+ * The STOPPED flag tells us that the LUN should default to the powered
+ * off state. It will return 0x04,0x02 until it is powered up. ("Logical
+ * unit not ready, initializing command required.")
+ *
+ * The NO_MEDIA flag tells us that the LUN has no media inserted.
+ *
+ * The PRIMARY flag tells us that this LUN is registered as a Primary LUN
+ * which is accessible via the Master shelf controller in an HA. This flag
+ * being set indicates a Primary LUN. This flag being reset represents a
+ * Secondary LUN controlled by the Secondary controller in an HA
+ * configuration. Flag is applicable at this time to T_DIRECT types.
+ *
+ * The SERIAL_NUM flag tells us that the serial_num field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x80.
+ *
+ * The DEVID flag tells us that the device_id field is filled in and
+ * valid for use in SCSI INQUIRY VPD page 0x83.
+ *
+ * The DEV_TYPE flag tells us that the device_type field is filled in.
+ *
+ * The EJECTED flag tells us that the removable LUN has tray open.
+ *
+ * The UNMAP flag tells us that this LUN supports UNMAP.
+ *
+ * The OFFLINE flag tells us that this LUN can not access backing store.
+ */
+typedef enum {
+ CTL_LUN_FLAG_ID_REQ = 0x01,
+ CTL_LUN_FLAG_STOPPED = 0x02,
+ CTL_LUN_FLAG_NO_MEDIA = 0x04,
+ CTL_LUN_FLAG_PRIMARY = 0x08,
+ CTL_LUN_FLAG_SERIAL_NUM = 0x10,
+ CTL_LUN_FLAG_DEVID = 0x20,
+ CTL_LUN_FLAG_DEV_TYPE = 0x40,
+ CTL_LUN_FLAG_UNMAP = 0x80,
+ CTL_LUN_FLAG_EJECTED = 0x100,
+ CTL_LUN_FLAG_READONLY = 0x200
+} ctl_backend_lun_flags;
/*
* LUN creation parameters:
*
- * flags: Various LUN flags, see ctl_backend.h for a
- * description of the flag values and meanings.
+ * flags: Various LUN flags, see above.
*
* device_type: The SCSI device type. e.g. 0 for Direct Access,
* 3 for Processor, etc. Only certain backends may
@@ -465,6 +533,7 @@ union ctl_lunreq_data {
* kern_be_args: For kernel use only.
*/
struct ctl_lun_req {
+#define CTL_BE_NAME_LEN 32
char backend[CTL_BE_NAME_LEN];
ctl_lunreq_type reqtype;
union ctl_lunreq_data reqdata;
@@ -761,6 +830,8 @@ struct ctl_lun_map {
#define CTL_PORT_REQ _IOWR(CTL_MINOR, 0x26, struct ctl_req)
#define CTL_PORT_LIST _IOWR(CTL_MINOR, 0x27, struct ctl_lun_list)
#define CTL_LUN_MAP _IOW(CTL_MINOR, 0x28, struct ctl_lun_map)
+#define CTL_GET_LUN_STATS _IOWR(CTL_MINOR, 0x29, struct ctl_get_io_stats)
+#define CTL_GET_PORT_STATS _IOWR(CTL_MINOR, 0x2a, struct ctl_get_io_stats)
#endif /* _CTL_IOCTL_H_ */
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index e118343..40f0e61 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -1,6 +1,6 @@
/*-
* Copyright (c) 2003, 2004, 2005, 2008 Silicon Graphics International Corp.
- * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -404,7 +404,10 @@ struct ctl_lun {
struct callout ie_callout; /* INTERVAL TIMER */
struct ctl_mode_pages mode_pages;
struct ctl_log_pages log_pages;
- struct ctl_lun_io_stats stats;
+#ifdef CTL_LEGACY_STATS
+ struct ctl_lun_io_stats legacy_stats;
+#endif /* CTL_LEGACY_STATS */
+ struct ctl_io_stats stats;
uint32_t res_idx;
uint32_t pr_generation;
uint64_t *pr_keys[CTL_MAX_PORTS];
@@ -412,7 +415,7 @@ struct ctl_lun {
uint32_t pr_res_idx;
uint8_t pr_res_type;
int prevent_count;
- uint32_t prevent[(CTL_MAX_INITIATORS+31)/32];
+ uint32_t *prevent;
uint8_t *write_buffer;
struct ctl_devid *lun_devid;
TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists;
@@ -467,7 +470,10 @@ struct ctl_softc {
STAILQ_HEAD(, ctl_backend_driver) be_list;
struct uma_zone *io_zone;
uint32_t cur_pool_id;
+ int shutdown;
struct ctl_thread threads[CTL_MAX_THREADS];
+ struct thread *lun_thread;
+ struct thread *thresh_thread;
TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens;
struct callout tpc_timeout;
struct mtx tpc_lock;
diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c
index 484293b..c8d60ca 100644
--- a/sys/cam/ctl/ctl_tpc.c
+++ b/sys/cam/ctl/ctl_tpc.c
@@ -251,6 +251,7 @@ ctl_tpc_lun_shutdown(struct ctl_lun *lun)
int
ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_vpd_tpc *tpc_ptr;
struct scsi_vpd_tpc_descriptor *d_ptr;
struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
@@ -264,11 +265,8 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
- struct ctl_lun *lun;
int data_len;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
data_len = sizeof(struct scsi_vpd_tpc) +
sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
@@ -284,20 +282,10 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (data_len < alloc_len) {
- ctsio->residual = alloc_len - data_len;
- ctsio->kern_data_len = data_len;
- ctsio->kern_total_len = data_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
+ ctsio->kern_data_len = min(data_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
/*
* The control device is always connected. The disk device, on the
@@ -460,20 +448,10 @@ ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
@@ -521,7 +499,7 @@ tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
int
ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_copy_status_lid1 *cdb;
struct scsi_receive_copy_status_lid1_data *data;
struct tpc_list *list;
@@ -533,8 +511,6 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = cdb->list_identifier;
@@ -560,20 +536,10 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4, data->available_data);
@@ -603,7 +569,7 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
int
ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_copy_failure_details *cdb;
struct scsi_receive_copy_failure_details_data *data;
struct tpc_list *list;
@@ -615,8 +581,6 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = cdb->list_identifier;
@@ -640,20 +604,10 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
if (list_copy.completed && (list_copy.error || list_copy.abort)) {
@@ -675,7 +629,7 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
int
ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_copy_status_lid4 *cdb;
struct scsi_receive_copy_status_lid4_data *data;
struct tpc_list *list;
@@ -687,8 +641,6 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = scsi_4btoul(cdb->list_identifier);
@@ -714,20 +666,10 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
@@ -761,7 +703,7 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
int
ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_copy_operation_abort *cdb;
struct tpc_list *list;
int retval;
@@ -770,8 +712,6 @@ ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = scsi_4btoul(cdb->list_identifier);
@@ -894,7 +834,7 @@ tpc_process_b2b(struct tpc_list *list)
dcscd = scsi_2btoul(seg->dst_cscd);
sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
- if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
+ if (sl == UINT64_MAX || dl == UINT64_MAX) {
ctl_set_sense(list->ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_COPY_ABORTED,
/*asc*/ 0x08, /*ascq*/ 0x04,
@@ -1042,7 +982,7 @@ tpc_process_verify(struct tpc_list *list)
seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
cscd = scsi_2btoul(seg->src_cscd);
sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
- if (sl >= CTL_MAX_LUNS) {
+ if (sl == UINT64_MAX) {
ctl_set_sense(list->ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_COPY_ABORTED,
/*asc*/ 0x08, /*ascq*/ 0x04,
@@ -1106,7 +1046,7 @@ tpc_process_register_key(struct tpc_list *list)
seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
cscd = scsi_2btoul(seg->dst_cscd);
dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
- if (dl >= CTL_MAX_LUNS) {
+ if (dl == UINT64_MAX) {
ctl_set_sense(list->ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_COPY_ABORTED,
/*asc*/ 0x08, /*ascq*/ 0x04,
@@ -1705,11 +1645,11 @@ tpc_done(union ctl_io *io)
int
ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_extended_copy *cdb;
struct scsi_extended_copy_lid1_data *data;
struct scsi_ec_cscd *cscd;
struct scsi_ec_segment *seg;
- struct ctl_lun *lun;
struct tpc_list *list, *tlist;
uint8_t *ptr;
char *value;
@@ -1717,7 +1657,6 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_extended_copy *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -1741,7 +1680,6 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -1861,11 +1799,11 @@ done:
int
ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
{
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_extended_copy *cdb;
struct scsi_extended_copy_lid4_data *data;
struct scsi_ec_cscd *cscd;
struct scsi_ec_segment *seg;
- struct ctl_lun *lun;
struct tpc_list *list, *tlist;
uint8_t *ptr;
char *value;
@@ -1873,7 +1811,6 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_extended_copy *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -1897,7 +1834,6 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -2064,11 +2000,11 @@ tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
int
ctl_populate_token(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_port *port = CTL_PORT(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_populate_token *cdb;
struct scsi_populate_token_data *data;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
- struct ctl_port *port;
struct tpc_list *list, *tlist;
struct tpc_token *token;
uint64_t lba;
@@ -2076,9 +2012,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_populate_token\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
- port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port];
cdb = (struct scsi_populate_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -2098,7 +2031,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -2232,10 +2164,10 @@ done:
int
ctl_write_using_token(struct ctl_scsiio *ctsio)
{
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_write_using_token *cdb;
struct scsi_write_using_token_data *data;
- struct ctl_softc *softc;
- struct ctl_lun *lun;
struct tpc_list *list, *tlist;
struct tpc_token *token;
uint64_t lba;
@@ -2243,8 +2175,6 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
cdb = (struct scsi_write_using_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@@ -2264,7 +2194,6 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@@ -2389,7 +2318,7 @@ done:
int
ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
{
- struct ctl_lun *lun;
+ struct ctl_lun *lun = CTL_LUN(ctsio);
struct scsi_receive_rod_token_information *cdb;
struct scsi_receive_copy_status_lid4_data *data;
struct tpc_list *list;
@@ -2402,8 +2331,6 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
retval = CTL_RETVAL_COMPLETE;
list_id = scsi_4btoul(cdb->list_identifier);
@@ -2430,20 +2357,10 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
@@ -2487,8 +2404,7 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
int
ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
{
- struct ctl_softc *softc;
- struct ctl_lun *lun;
+ struct ctl_softc *softc = CTL_SOFTC(ctsio);
struct scsi_report_all_rod_tokens *cdb;
struct scsi_report_all_rod_tokens_data *data;
struct tpc_token *token;
@@ -2498,9 +2414,6 @@ ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- softc = lun->ctl_softc;
-
retval = CTL_RETVAL_COMPLETE;
tokens = 0;
@@ -2515,20 +2428,10 @@ ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
-
ctsio->kern_sg_entries = 0;
-
- if (total_len < alloc_len) {
- ctsio->residual = alloc_len - total_len;
- ctsio->kern_data_len = total_len;
- ctsio->kern_total_len = total_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
+ ctsio->kern_data_len = min(total_len, alloc_len);
+ ctsio->kern_total_len = ctsio->kern_data_len;
data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
i = 0;
diff --git a/sys/cam/ctl/ctl_tpc_local.c b/sys/cam/ctl/ctl_tpc_local.c
index 4f368f9..e5e77b4 100644
--- a/sys/cam/ctl/ctl_tpc_local.c
+++ b/sys/cam/ctl/ctl_tpc_local.c
@@ -65,7 +65,7 @@ struct tpcl_softc {
static struct tpcl_softc tpcl_softc;
static int tpcl_init(void);
-static void tpcl_shutdown(void);
+static int tpcl_shutdown(void);
static void tpcl_datamove(union ctl_io *io);
static void tpcl_done(union ctl_io *io);
@@ -84,7 +84,7 @@ tpcl_init(void)
struct tpcl_softc *tsoftc = &tpcl_softc;
struct ctl_port *port;
struct scsi_transportid_spi *tid;
- int len;
+ int error, len;
memset(tsoftc, 0, sizeof(*tsoftc));
@@ -100,9 +100,9 @@ tpcl_init(void)
port->targ_port = -1;
port->max_initiators = 1;
- if (ctl_port_register(port) != 0) {
- printf("%s: ctl_port_register() failed with error\n", __func__);
- return (0);
+ if ((error = ctl_port_register(port)) != 0) {
+ printf("%s: tpc port registration failed\n", __func__);
+ return (error);
}
len = sizeof(struct scsi_transportid_spi);
@@ -118,16 +118,17 @@ tpcl_init(void)
return (0);
}
-void
+static int
tpcl_shutdown(void)
{
struct tpcl_softc *tsoftc = &tpcl_softc;
- struct ctl_port *port;
+ struct ctl_port *port = &tsoftc->port;
+ int error;
- port = &tsoftc->port;
ctl_port_offline(port);
- if (ctl_port_deregister(&tsoftc->port) != 0)
- printf("%s: ctl_frontend_deregister() failed\n", __func__);
+ if ((error = ctl_port_deregister(port)) != 0)
+ printf("%s: tpc port deregistration failed\n", __func__);
+ return (error);
}
static void
@@ -137,7 +138,7 @@ tpcl_datamove(union ctl_io *io)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int kern_watermark, ext_watermark;
struct ctl_scsiio *ctsio;
int i, j;
@@ -196,7 +197,6 @@ tpcl_datamove(union ctl_io *io)
kern_watermark = 0;
ext_watermark = ext_offset;
- len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@@ -218,9 +218,6 @@ tpcl_datamove(union ctl_io *io)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
- kern_watermark += len_to_copy;
- ext_watermark += len_to_copy;
-
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
@@ -236,27 +233,27 @@ tpcl_datamove(union ctl_io *io)
memcpy(kern_ptr, ext_ptr, len_to_copy);
}
- len_copied += len_to_copy;
+ ctsio->ext_data_filled += len_to_copy;
+ ctsio->kern_data_resid -= len_to_copy;
+ ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
+ kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
- ctsio->ext_data_filled += len_copied;
-
CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
__func__, ext_sg_entries, kern_sg_entries));
CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
__func__, ctsio->ext_data_len, ctsio->kern_data_len));
- /* XXX KDM set residual?? */
bailout:
io->scsiio.be_move_done(io);
}
@@ -290,7 +287,7 @@ tpcl_resolve(struct ctl_softc *softc, int init_port,
port = NULL;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (port != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
continue;
if (lun->lun_devid == NULL)
continue;
diff --git a/sys/cam/ctl/ctl_util.c b/sys/cam/ctl/ctl_util.c
index 6fcec03..33f0899 100644
--- a/sys/cam/ctl/ctl_util.c
+++ b/sys/cam/ctl/ctl_util.c
@@ -697,7 +697,6 @@ ctl_scsi_free_io(union ctl_io *io)
free(io);
}
-#endif /* !_KERNEL */
void
ctl_scsi_zero_io(union ctl_io *io)
{
@@ -707,11 +706,10 @@ ctl_scsi_zero_io(union ctl_io *io)
return;
pool_ref = io->io_hdr.pool;
-
memset(io, 0, sizeof(*io));
-
io->io_hdr.pool = pool_ref;
}
+#endif /* !_KERNEL */
const char *
ctl_scsi_task_string(struct ctl_taskio *taskio)
diff --git a/sys/cam/ctl/ctl_util.h b/sys/cam/ctl/ctl_util.h
index 2966b49..67c0915 100644
--- a/sys/cam/ctl/ctl_util.h
+++ b/sys/cam/ctl/ctl_util.h
@@ -96,8 +96,10 @@ void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr,
#ifndef _KERNEL
union ctl_io *ctl_scsi_alloc_io(uint32_t initid);
void ctl_scsi_free_io(union ctl_io *io);
-#endif /* !_KERNEL */
void ctl_scsi_zero_io(union ctl_io *io);
+#else
+#define ctl_scsi_zero_io(io) ctl_zero_io(io)
+#endif /* !_KERNEL */
const char *ctl_scsi_task_string(struct ctl_taskio *taskio);
void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb);
void ctl_io_error_sbuf(union ctl_io *io,
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index abdbdcd..dc82e91 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -185,8 +185,11 @@ MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
/* This is only used in the CTIO */
#define ccb_atio ppriv_ptr1
-int ctlfeinitialize(void);
-void ctlfeshutdown(void);
+#define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
+#define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
+
+static int ctlfeinitialize(void);
+static int ctlfeshutdown(void);
static periph_init_t ctlfeperiphinit;
static void ctlfeasync(void *callback_arg, uint32_t code,
struct cam_path *path, void *arg);
@@ -224,13 +227,15 @@ static struct ctl_frontend ctlfe_frontend =
};
CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
-void
+static int
ctlfeshutdown(void)
{
- return;
+
+ /* CAM does not support periph driver unregister now. */
+ return (EBUSY);
}
-int
+static int
ctlfeinitialize(void)
{
@@ -240,7 +245,7 @@ ctlfeinitialize(void)
return (0);
}
-void
+static void
ctlfeperiphinit(void)
{
cam_status status;
@@ -554,7 +559,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
status = CAM_RESRC_UNAVAIL;
break;
}
- new_io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr = cmd_info;
+ PRIV_INFO(new_io) = cmd_info;
softc->atios_alloced++;
new_ccb->ccb_h.io_ptr = new_io;
@@ -702,7 +707,7 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
size_t off;
int i, idx;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
bus_softc = softc->parent_softc;
/*
@@ -718,15 +723,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
idx = cmd_info->cur_transfer_index;
off = cmd_info->cur_transfer_off;
cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
- if (io->scsiio.kern_sg_entries == 0) {
- /* No S/G list. */
+ if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */
+
+ /* One time shift for SRR offset. */
+ off += io->scsiio.ext_data_filled;
+ io->scsiio.ext_data_filled = 0;
+
*data_ptr = io->scsiio.kern_data_ptr + off;
if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
*dxfer_len = io->scsiio.kern_data_len - off;
} else {
*dxfer_len = bus_softc->maxio;
- cmd_info->cur_transfer_index = -1;
- cmd_info->cur_transfer_off = bus_softc->maxio;
+ cmd_info->cur_transfer_off += bus_softc->maxio;
cmd_info->flags |= CTLFE_CMD_PIECEWISE;
}
*sglist_cnt = 0;
@@ -735,9 +743,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
*flags |= CAM_DATA_PADDR;
else
*flags |= CAM_DATA_VADDR;
- } else {
- /* S/G list with physical or virtual pointers. */
+ } else { /* S/G list with physical or virtual pointers. */
ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
+
+ /* One time shift for SRR offset. */
+ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
+ io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
+ idx++;
+ off = 0;
+ }
+ off += io->scsiio.ext_data_filled;
+ io->scsiio.ext_data_filled = 0;
+
cam_sglist = cmd_info->cam_sglist;
*dxfer_len = 0;
for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
@@ -806,7 +823,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
flags = atio->ccb_h.flags &
(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
cmd_info->cur_transfer_index = 0;
cmd_info->cur_transfer_off = 0;
cmd_info->flags = 0;
@@ -815,18 +832,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
/*
* Datamove call, we need to setup the S/G list.
*/
- scsi_status = 0;
- csio->cdb_len = atio->cdb_len;
ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
&csio->sglist_cnt);
- io->scsiio.ext_data_filled += dxfer_len;
- if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) {
- xpt_print(periph->path, "%s: tag 0x%04x "
- "fill len %u > total %u\n",
- __func__, io->scsiio.tag_num,
- io->scsiio.ext_data_filled,
- io->scsiio.kern_total_len);
- }
} else {
/*
* We're done, send status back.
@@ -888,8 +895,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
data_ptr = NULL;
dxfer_len = 0;
csio->sglist_cnt = 0;
- scsi_status = 0;
}
+ scsi_status = 0;
if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
(cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
@@ -938,7 +945,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
&& (csio->sglist_cnt != 0))) {
printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
"%d sg %u\n", __func__, atio->tag_id,
- atio->cdb_io.cdb_bytes[0], flags, dxfer_len,
+ atio_cdb_ptr(atio)[0], flags, dxfer_len,
csio->sglist_cnt);
printf("%s: tag %04x io status %#x\n", __func__,
atio->tag_id, io->io_hdr.status);
@@ -987,7 +994,7 @@ ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
switch (ccb->ccb_h.func_code) {
case XPT_ACCEPT_TARGET_IO:
softc->atios_freed++;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
free(cmd_info, M_CTLFE);
break;
case XPT_IMMEDIATE_NOTIFY:
@@ -1024,8 +1031,7 @@ ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
{
uint64_t lba;
uint32_t num_blocks, nbc;
- uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)?
- atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes;
+ uint8_t *cmdbyt = atio_cdb_ptr(atio);
nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */
@@ -1154,12 +1160,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
mtx_unlock(mtx);
io = done_ccb->ccb_h.io_ptr;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
ctl_zero_io(io);
/* Save pointers on both sides */
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb;
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr = cmd_info;
+ PRIV_CCB(io) = done_ccb;
+ PRIV_INFO(io) = cmd_info;
done_ccb->ccb_h.io_ptr = io;
/*
@@ -1203,8 +1209,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
__func__, atio->cdb_len, sizeof(io->scsiio.cdb));
}
io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
- bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb,
- io->scsiio.cdb_len);
+ bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
#ifdef CTLFEDEBUG
printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
@@ -1245,13 +1250,36 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
| (done_ccb->csio.msg_ptr[6]);
}
+ /*
+ * If we have an SRR and we're still sending data, we
+ * should be able to adjust offsets and cycle again.
+ * It is possible only if offset is from this datamove.
+ */
+ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
+ srr_off >= io->scsiio.kern_rel_offset &&
+ srr_off < io->scsiio.kern_rel_offset +
+ io->scsiio.kern_data_len) {
+ io->scsiio.kern_data_resid =
+ io->scsiio.kern_rel_offset +
+ io->scsiio.kern_data_len - srr_off;
+ io->scsiio.ext_data_filled = srr_off;
+ io->scsiio.io_hdr.status = CTL_STATUS_NONE;
+ io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
+ softc->ccbs_freed++;
+ xpt_release_ccb(done_ccb);
+ TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
+ periph_links.tqe);
+ xpt_schedule(periph, /*priority*/ 1);
+ break;
+ }
+
+ /*
+ * If status was being sent, the back end data is now history.
+ * Hack it up and resubmit a new command with the CDB adjusted.
+ * If the SIM does the right thing, all of the resid math
+ * should work.
+ */
if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
- /*
- * If status was being sent, the back end data is now
- * history. Hack it up and resubmit a new command with
- * the CDB adjusted. If the SIM does the right thing,
- * all of the resid math should work.
- */
softc->ccbs_freed++;
xpt_release_ccb(done_ccb);
if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
@@ -1261,22 +1289,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
/*
* Fall through to doom....
*/
- } else if (srr) {
- /*
- * If we have an srr and we're still sending data, we
- * should be able to adjust offsets and cycle again.
- */
- io->scsiio.kern_rel_offset =
- io->scsiio.ext_data_filled = srr_off;
- io->scsiio.ext_data_len = io->scsiio.kern_total_len -
- io->scsiio.kern_rel_offset;
- softc->ccbs_freed++;
- io->scsiio.io_hdr.status = CTL_STATUS_NONE;
- xpt_release_ccb(done_ccb);
- TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
- periph_links.tqe);
- xpt_schedule(periph, /*priority*/ 1);
- break;
}
if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
@@ -1315,20 +1327,10 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
struct ccb_scsiio *csio;
csio = &done_ccb->csio;
- cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr;
+ cmd_info = PRIV_INFO(io);
io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
- io->scsiio.ext_data_len += csio->dxfer_len;
- if (io->scsiio.ext_data_len >
- io->scsiio.kern_total_len) {
- xpt_print(periph->path, "%s: tag 0x%04x "
- "done len %u > total %u sent %u\n",
- __func__, io->scsiio.tag_num,
- io->scsiio.ext_data_len,
- io->scsiio.kern_total_len,
- io->scsiio.ext_data_filled);
- }
/*
* Translate CAM status to CTL status. Success
* does not change the overall, ctl_io status. In
@@ -1338,6 +1340,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
case CAM_REQ_CMP:
+ io->scsiio.kern_data_resid -= csio->dxfer_len;
io->io_hdr.port_status = 0;
break;
default:
@@ -1367,7 +1370,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
&& (io->io_hdr.port_status == 0)) {
ccb_flags flags;
- uint8_t scsi_status;
uint8_t *data_ptr;
uint32_t dxfer_len;
@@ -1378,14 +1380,12 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
ctlfedata(softc, io, &flags, &data_ptr,
&dxfer_len, &csio->sglist_cnt);
- scsi_status = 0;
-
if (((flags & CAM_SEND_STATUS) == 0)
&& (dxfer_len == 0)) {
printf("%s: tag %04x no status or "
"len cdb = %02x\n", __func__,
atio->tag_id,
- atio->cdb_io.cdb_bytes[0]);
+ atio_cdb_ptr(atio)[0]);
printf("%s: tag %04x io status %#x\n",
__func__, atio->tag_id,
io->io_hdr.status);
@@ -1399,7 +1399,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
MSG_SIMPLE_Q_TAG : 0,
atio->tag_id,
atio->init_id,
- scsi_status,
+ 0,
/*data_ptr*/ data_ptr,
/*dxfer_len*/ dxfer_len,
/*timeout*/ 5 * 1000);
@@ -1444,7 +1444,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
send_ctl_io = 1;
io->io_hdr.io_type = CTL_IO_TASK;
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
+ PRIV_CCB(io) = done_ccb;
inot->ccb_h.io_ptr = io;
io->io_hdr.nexus.initid = inot->initiator_id;
io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
@@ -2001,7 +2001,8 @@ ctlfe_datamove(union ctl_io *io)
KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
- ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ io->scsiio.ext_data_filled = 0;
+ ccb = PRIV_CCB(io);
periph = xpt_path_periph(ccb->ccb_h.path);
cam_periph_lock(periph);
softc = (struct ctlfe_lun_softc *)periph->softc;
@@ -2021,7 +2022,7 @@ ctlfe_done(union ctl_io *io)
struct cam_periph *periph;
struct ctlfe_lun_softc *softc;
- ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ ccb = PRIV_CCB(io);
periph = xpt_path_periph(ccb->ccb_h.path);
cam_periph_lock(periph);
softc = (struct ctlfe_lun_softc *)periph->softc;
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index 3ebce7a..f28f8dc 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -1370,7 +1370,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */
"Information unit too long") },
/* DT P R MAEBK F */
- { SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */
+ { SST(0x0E, 0x03, SS_FATAL | EINVAL,
"Invalid field in command information unit") },
/* D W O BK */
{ SST(0x10, 0x00, SS_RDEF,
@@ -3616,15 +3616,9 @@ scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio,
#endif /* _KERNEL/!_KERNEL */
- if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
- sbuf_printf(sb, "%s. CDB: ",
- scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data));
- scsi_cdb_sbuf(csio->cdb_io.cdb_ptr, sb);
- } else {
- sbuf_printf(sb, "%s. CDB: ",
- scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data));
- scsi_cdb_sbuf(csio->cdb_io.cdb_bytes, sb);
- }
+ sbuf_printf(sb, "%s. CDB: ",
+ scsi_op_desc(scsiio_cdb_ptr(csio)[0], inq_data));
+ scsi_cdb_sbuf(scsiio_cdb_ptr(csio), sb);
#ifdef _KERNEL
xpt_free_ccb((union ccb *)cgd);
@@ -5029,7 +5023,6 @@ scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio,
struct ccb_getdev *cgd;
#endif /* _KERNEL */
char path_str[64];
- uint8_t *cdb;
#ifndef _KERNEL
if (device == NULL)
@@ -5127,14 +5120,9 @@ scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio,
sense = &csio->sense_data;
}
- if (csio->ccb_h.flags & CAM_CDB_POINTER)
- cdb = csio->cdb_io.cdb_ptr;
- else
- cdb = csio->cdb_io.cdb_bytes;
-
scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb,
- path_str, inq_data, cdb, csio->cdb_len);
-
+ path_str, inq_data, scsiio_cdb_ptr(csio), csio->cdb_len);
+
#ifdef _KERNEL
xpt_free_ccb((union ccb*)cgd);
#endif /* _KERNEL/!_KERNEL */
@@ -7622,24 +7610,34 @@ scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
}
void
-scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *, union ccb *),
- u_int8_t tag_action, int dbd, u_int8_t page_code,
- u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
- u_int8_t sense_len, u_int32_t timeout)
+scsi_mode_sense(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+ int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len,
+ uint8_t sense_len, uint32_t timeout)
{
- scsi_mode_sense_len(csio, retries, cbfcnp, tag_action, dbd,
- page_code, page, param_buf, param_len, 0,
- sense_len, timeout);
+ scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd,
+ pc, page, 0, param_buf, param_len, 0, sense_len, timeout);
}
void
-scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *, union ccb *),
- u_int8_t tag_action, int dbd, u_int8_t page_code,
- u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
- int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
+scsi_mode_sense_len(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+ int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len,
+ int minimum_cmd_size, uint8_t sense_len, uint32_t timeout)
+{
+
+ scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd,
+ pc, page, 0, param_buf, param_len, minimum_cmd_size,
+ sense_len, timeout);
+}
+
+void
+scsi_mode_sense_subpage(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action,
+ int dbd, uint8_t pc, uint8_t page, uint8_t subpage, uint8_t *param_buf,
+ uint32_t param_len, int minimum_cmd_size, uint8_t sense_len,
+ uint32_t timeout)
{
u_int8_t cdb_len;
@@ -7658,7 +7656,8 @@ scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
scsi_cmd->opcode = MODE_SENSE_6;
if (dbd != 0)
scsi_cmd->byte2 |= SMS_DBD;
- scsi_cmd->page = page_code | page;
+ scsi_cmd->page = pc | page;
+ scsi_cmd->subpage = subpage;
scsi_cmd->length = param_len;
cdb_len = sizeof(*scsi_cmd);
} else {
@@ -7672,7 +7671,8 @@ scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
scsi_cmd->opcode = MODE_SENSE_10;
if (dbd != 0)
scsi_cmd->byte2 |= SMS_DBD;
- scsi_cmd->page = page_code | page;
+ scsi_cmd->page = pc | page;
+ scsi_cmd->subpage = subpage;
scsi_ulto2b(param_len, scsi_cmd->length);
cdb_len = sizeof(*scsi_cmd);
}
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 5dc496e..19ee882 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -228,6 +228,7 @@ struct scsi_mode_select_6
u_int8_t opcode;
u_int8_t byte2;
#define SMS_SP 0x01
+#define SMS_RTD 0x02
#define SMS_PF 0x10
u_int8_t unused[2];
u_int8_t length;
@@ -3895,21 +3896,24 @@ void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
u_int8_t sense_len, u_int32_t timeout);
void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *,
- union ccb *),
- u_int8_t tag_action, int dbd,
- u_int8_t page_code, u_int8_t page,
- u_int8_t *param_buf, u_int32_t param_len,
- u_int8_t sense_len, u_int32_t timeout);
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int dbd, uint8_t pc, uint8_t page,
+ uint8_t *param_buf, uint32_t param_len,
+ uint8_t sense_len, uint32_t timeout);
void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
- void (*cbfcnp)(struct cam_periph *,
- union ccb *),
- u_int8_t tag_action, int dbd,
- u_int8_t page_code, u_int8_t page,
- u_int8_t *param_buf, u_int32_t param_len,
- int minimum_cmd_size, u_int8_t sense_len,
- u_int32_t timeout);
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int dbd, uint8_t pc, uint8_t page,
+ uint8_t *param_buf, uint32_t param_len,
+ int minimum_cmd_size, uint8_t sense_len, uint32_t timeout);
+
+void scsi_mode_sense_subpage(struct ccb_scsiio *csio,
+ uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int dbd, uint8_t pc,
+ uint8_t page, uint8_t subpage,
+ uint8_t *param_buf, uint32_t param_len,
+ int minimum_cmd_size, uint8_t sense_len, uint32_t timeout);
void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
diff --git a/sys/cam/scsi/scsi_ch.c b/sys/cam/scsi/scsi_ch.c
index 077371d..24ce964 100644
--- a/sys/cam/scsi/scsi_ch.c
+++ b/sys/cam/scsi/scsi_ch.c
@@ -586,7 +586,7 @@ chstart(struct cam_periph *periph, union ccb *start_ccb)
/* tag_action */ MSG_SIMPLE_Q_TAG,
/* dbd */ (softc->quirks & CH_Q_NO_DBD) ?
FALSE : TRUE,
- /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* pc */ SMS_PAGE_CTRL_CURRENT,
/* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
/* param_buf */ (u_int8_t *)mode_buffer,
/* param_len */ mode_buffer_len,
@@ -1587,7 +1587,7 @@ chgetparams(struct cam_periph *periph)
/* cbfcnp */ chdone,
/* tag_action */ MSG_SIMPLE_Q_TAG,
/* dbd */ dbd,
- /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* pc */ SMS_PAGE_CTRL_CURRENT,
/* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE,
/* param_buf */ (u_int8_t *)mode_buffer,
/* param_len */ mode_buffer_len,
@@ -1650,7 +1650,7 @@ chgetparams(struct cam_periph *periph)
/* cbfcnp */ chdone,
/* tag_action */ MSG_SIMPLE_Q_TAG,
/* dbd */ dbd,
- /* page_code */ SMS_PAGE_CTRL_CURRENT,
+ /* pc */ SMS_PAGE_CTRL_CURRENT,
/* page */ CH_DEVICE_CAP_PAGE,
/* param_buf */ (u_int8_t *)mode_buffer,
/* param_len */ mode_buffer_len,
diff --git a/sys/cam/scsi/scsi_pass.c b/sys/cam/scsi/scsi_pass.c
index 09694c5..d09d7d7 100644
--- a/sys/cam/scsi/scsi_pass.c
+++ b/sys/cam/scsi/scsi_pass.c
@@ -28,8 +28,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include "opt_kdtrace.h"
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c
index 2f60c13..db5e45d 100644
--- a/sys/compat/freebsd32/freebsd32_misc.c
+++ b/sys/compat/freebsd32/freebsd32_misc.c
@@ -244,7 +244,8 @@ copy_statfs(struct statfs *in, struct statfs32 *out)
#ifdef COMPAT_FREEBSD4
int
-freebsd4_freebsd32_getfsstat(struct thread *td, struct freebsd4_freebsd32_getfsstat_args *uap)
+freebsd4_freebsd32_getfsstat(struct thread *td,
+ struct freebsd4_freebsd32_getfsstat_args *uap)
{
struct statfs *buf, *sp;
struct statfs32 stat32;
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 58b71f6..e5ab44a 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -747,8 +747,7 @@ options NETGRAPH_IPFW
options NETGRAPH_KSOCKET
options NETGRAPH_L2TP
options NETGRAPH_LMI
-# MPPC compression requires proprietary files (not included)
-#options NETGRAPH_MPPC_COMPRESSION
+options NETGRAPH_MPPC_COMPRESSION
options NETGRAPH_MPPC_ENCRYPTION
options NETGRAPH_NETFLOW
options NETGRAPH_NAT
@@ -1922,8 +1921,9 @@ device xmphy # XaQti XMAC II
# cm: Arcnet SMC COM90c26 / SMC COM90c56
# (and SMC COM90c66 in '56 compatibility mode) adapters.
# cxgb: Chelsio T3 based 1GbE/10GbE PCIe Ethernet adapters.
-# cxgbe:Chelsio T4 and T5 based 1GbE/10GbE/40GbE PCIe Ethernet adapters.
-# cxgbev: Chelsio T4 and T5 based PCIe Virtual Functions.
+# cxgbe:Chelsio T4, T5, and T6-based 1/10/25/40/100GbE PCIe Ethernet
+# adapters.
+# cxgbev: Chelsio T4, T5, and T6-based PCIe Virtual Functions.
# dc: Support for PCI fast ethernet adapters based on the DEC/Intel 21143
# and various workalikes including:
# the ADMtek AL981 Comet and AN985 Centaur, the ASIX Electronics
@@ -2072,9 +2072,6 @@ device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet
device bfe # Broadcom BCM440x 10/100 Ethernet
device bge # Broadcom BCM570xx Gigabit Ethernet
device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn
-device cxgb # Chelsio T3 10 Gigabit Ethernet
-device cxgb_t3fw # Chelsio T3 10 Gigabit Ethernet firmware
-device cxgbe # Chelsio T4 and T5 1GbE/10GbE/40GbE
device dc # DEC/Intel 21143 and various workalikes
device et # Agere ET1310 10/100/Gigabit Ethernet
device fxp # Intel EtherExpress PRO/100B (82557, 82558)
@@ -2105,7 +2102,10 @@ device wb # Winbond W89C840F
device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'')
# PCI Ethernet NICs.
-device cxgbev # Chelsio T4 and T5 1GbE/10GbE/40GbE VF
+device cxgb # Chelsio T3 10 Gigabit Ethernet
+device cxgb_t3fw # Chelsio T3 10 Gigabit Ethernet firmware
+device cxgbe # Chelsio T4-T6 1/10/25/40/100 Gigabit Ethernet
+device cxgbev # Chelsio T4-T6 Virtual Functions
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel Pro/1000 Gigabit Ethernet
device igb # Intel Pro/1000 PCIE Gigabit Ethernet
diff --git a/sys/conf/options b/sys/conf/options
index acd31c4..3892268 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -512,7 +512,6 @@ NETGRAPH_IPFW opt_netgraph.h
NETGRAPH_KSOCKET opt_netgraph.h
NETGRAPH_L2TP opt_netgraph.h
NETGRAPH_LMI opt_netgraph.h
-# MPPC compression requires proprietary files (not included)
NETGRAPH_MPPC_COMPRESSION opt_netgraph.h
NETGRAPH_MPPC_ENCRYPTION opt_netgraph.h
NETGRAPH_NAT opt_netgraph.h
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c
index 7239121..51edec3 100644
--- a/sys/dev/age/if_age.c
+++ b/sys/dev/age/if_age.c
@@ -587,7 +587,7 @@ age_attach(device_t dev)
/* Create device sysctl node. */
age_sysctl_node(sc);
- if ((error = age_dma_alloc(sc) != 0))
+ if ((error = age_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index 5fabdaf..a9061bc 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -711,7 +711,7 @@ ahci_ch_attach(device_t dev)
/* Construct SIM entry */
ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
device_get_unit(dev), (struct mtx *)&ch->mtx,
- min(2, ch->numslots),
+ (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots),
(ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0,
devq);
if (ch->sim == NULL) {
@@ -1122,8 +1122,6 @@ ahci_ch_intr(void *arg)
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
- if (istatus == 0)
- return;
mtx_lock(&ch->mtx);
ahci_ch_intr_main(ch, istatus);
@@ -1140,8 +1138,6 @@ ahci_ch_intr_direct(void *arg)
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
- if (istatus == 0)
- return;
mtx_lock(&ch->mtx);
ch->batch = 1;
@@ -1228,8 +1224,19 @@ ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
/* Process command errors */
if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF |
AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
- ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
- >> AHCI_P_CMD_CCS_SHIFT;
+ if (ch->quirks & AHCI_Q_NOCCS) {
+ /*
+ * ASMedia chips sometimes report failed commands as
+ * completed. Count all running commands as failed.
+ */
+ cstatus |= ch->rslots;
+
+ /* They also report wrong CCS, so try to guess one. */
+ ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1;
+ } else {
+ ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) &
+ AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT;
+ }
//device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n",
// __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD),
// serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs);
diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h
index 61643c1..ca7631e 100644
--- a/sys/dev/ahci/ahci.h
+++ b/sys/dev/ahci/ahci.h
@@ -574,6 +574,7 @@ enum ahci_err_type {
#define AHCI_Q_SATA1_UNIT0 0x00008000 /* need better method for this */
#define AHCI_Q_ABAR0 0x00010000
#define AHCI_Q_1MSI 0x00020000
+#define AHCI_Q_NOCCS 0x00400000
#define AHCI_Q_BIT_STRING \
"\020" \
@@ -594,7 +595,8 @@ enum ahci_err_type {
"\017MAXIO_64K" \
"\020SATA1_UNIT0" \
"\021ABAR0" \
- "\0221MSI"
+ "\0221MSI" \
+ "\027NOCCS"
int ahci_attach(device_t dev);
int ahci_detach(device_t dev);
diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c
index bb14ed6..7f4a1b6 100644
--- a/sys/dev/ahci/ahci_pci.c
+++ b/sys/dev/ahci/ahci_pci.c
@@ -73,8 +73,15 @@ static const struct {
{0x78021022, 0x00, "AMD Hudson-2", 0},
{0x78031022, 0x00, "AMD Hudson-2", 0},
{0x78041022, 0x00, "AMD Hudson-2", 0},
- {0x06111b21, 0x00, "ASMedia ASM2106", 0},
- {0x06121b21, 0x00, "ASMedia ASM1061", 0},
+ {0x06011b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS},
+ {0x06021b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS},
+ {0x06111b21, 0x00, "ASMedia ASM1061", AHCI_Q_NOCCS},
+ {0x06121b21, 0x00, "ASMedia ASM1062", AHCI_Q_NOCCS},
+ {0x06201b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06211b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06221b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06241b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
+ {0x06251b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS},
{0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE},
{0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE},
{0x26818086, 0x00, "Intel ESB2", 0},
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index ba37b5f..76e4614 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -120,6 +120,8 @@ static struct alc_ident alc_ident_table[] = {
"Atheros AR8172 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024,
"Killer E2200 Gigabit Ethernet" },
+ { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024,
+ "Killer E2400 Gigabit Ethernet" },
{ 0, 0, 0, NULL}
};
@@ -254,7 +256,7 @@ static struct resource_spec alc_irq_spec_msix[] = {
{ -1, 0, 0 }
};
-static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
+static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
static int
alc_miibus_readreg(device_t dev, int phy, int reg)
@@ -1079,6 +1081,7 @@ alc_phy_down(struct alc_softc *sc)
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_AR8161:
case DEVICEID_ATHEROS_E2200:
+ case DEVICEID_ATHEROS_E2400:
case DEVICEID_ATHEROS_AR8162:
case DEVICEID_ATHEROS_AR8171:
case DEVICEID_ATHEROS_AR8172:
@@ -1396,12 +1399,15 @@ alc_attach(device_t dev)
* shows the same PHY model/revision number of AR8131.
*/
switch (sc->alc_ident->deviceid) {
+ case DEVICEID_ATHEROS_E2200:
+ case DEVICEID_ATHEROS_E2400:
+ sc->alc_flags |= ALC_FLAG_E2X00;
+ /* FALLTHROUGH */
case DEVICEID_ATHEROS_AR8161:
if (pci_get_subvendor(dev) == VENDORID_ATHEROS &&
pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0)
sc->alc_flags |= ALC_FLAG_LINK_WAR;
/* FALLTHROUGH */
- case DEVICEID_ATHEROS_E2200:
case DEVICEID_ATHEROS_AR8171:
sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
break;
@@ -1472,6 +1478,12 @@ alc_attach(device_t dev)
sc->alc_dma_rd_burst = 3;
if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
sc->alc_dma_wr_burst = 3;
+ /*
+ * Force maximum payload size to 128 bytes for E2200/E2400.
+ * Otherwise it triggers DMA write error.
+ */
+ if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)
+ sc->alc_dma_wr_burst = 0;
alc_init_pcie(sc);
}
@@ -1531,7 +1543,7 @@ alc_attach(device_t dev)
/* Create device sysctl node. */
alc_sysctl_node(sc);
- if ((error = alc_dma_alloc(sc) != 0))
+ if ((error = alc_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
@@ -4194,13 +4206,17 @@ alc_init_locked(struct alc_softc *sc)
reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
RXQ_CFG_RD_BURST_MASK;
reg |= RXQ_CFG_RSS_MODE_DIS;
- if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
+ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
- if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
- sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
- reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
+ if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
+ reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
+ } else {
+ if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
+ sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
+ reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
+ }
CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
/* Configure DMA parameters. */
@@ -4224,12 +4240,12 @@ alc_init_locked(struct alc_softc *sc)
switch (AR816X_REV(sc->alc_rev)) {
case AR816X_REV_A0:
case AR816X_REV_A1:
- reg |= DMA_CFG_RD_CHNL_SEL_1;
+ reg |= DMA_CFG_RD_CHNL_SEL_2;
break;
case AR816X_REV_B0:
/* FALLTHROUGH */
default:
- reg |= DMA_CFG_RD_CHNL_SEL_3;
+ reg |= DMA_CFG_RD_CHNL_SEL_4;
break;
}
}
diff --git a/sys/dev/alc/if_alcreg.h b/sys/dev/alc/if_alcreg.h
index 1ad75a3..ae63084 100644
--- a/sys/dev/alc/if_alcreg.h
+++ b/sys/dev/alc/if_alcreg.h
@@ -45,10 +45,11 @@
#define DEVICEID_ATHEROS_AR8152_B 0x2060 /* L2C V1.1 */
#define DEVICEID_ATHEROS_AR8152_B2 0x2062 /* L2C V2.0 */
#define DEVICEID_ATHEROS_AR8161 0x1091
-#define DEVICEID_ATHEROS_E2200 0xE091
#define DEVICEID_ATHEROS_AR8162 0x1090
#define DEVICEID_ATHEROS_AR8171 0x10A1
#define DEVICEID_ATHEROS_AR8172 0x10A0
+#define DEVICEID_ATHEROS_E2200 0xE091
+#define DEVICEID_ATHEROS_E2400 0xE0A1
#define ATHEROS_AR8152_B_V10 0xC0
#define ATHEROS_AR8152_B_V11 0xC1
diff --git a/sys/dev/alc/if_alcvar.h b/sys/dev/alc/if_alcvar.h
index 9a73ef4..a1c3382 100644
--- a/sys/dev/alc/if_alcvar.h
+++ b/sys/dev/alc/if_alcvar.h
@@ -235,7 +235,8 @@ struct alc_softc {
#define ALC_FLAG_APS 0x1000
#define ALC_FLAG_AR816X_FAMILY 0x2000
#define ALC_FLAG_LINK_WAR 0x4000
-#define ALC_FLAG_LINK 0x8000
+#define ALC_FLAG_E2X00 0x8000
+#define ALC_FLAG_LINK 0x10000
struct callout alc_tick_ch;
struct alc_hw_stats alc_stats;
diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c
index 6f0f83e..4eb5835 100644
--- a/sys/dev/ale/if_ale.c
+++ b/sys/dev/ale/if_ale.c
@@ -602,7 +602,7 @@ ale_attach(device_t dev)
/* Create device sysctl node. */
ale_sysctl_node(sc);
- if ((error = ale_dma_alloc(sc) != 0))
+ if ((error = ale_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
diff --git a/sys/dev/amdsbwd/amdsbwd.c b/sys/dev/amdsbwd/amdsbwd.c
index f8824e5..628aef2 100644
--- a/sys/dev/amdsbwd/amdsbwd.c
+++ b/sys/dev/amdsbwd/amdsbwd.c
@@ -416,8 +416,8 @@ amdsbwd_probe(device_t dev)
return (ENXIO);
}
rid = 0;
- res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul,
- AMDSB_PMIO_WIDTH, RF_ACTIVE | RF_SHAREABLE);
+ res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
+ RF_ACTIVE | RF_SHAREABLE);
if (res == NULL) {
device_printf(dev, "bus_alloc_resource for IO failed\n");
return (ENXIO);
diff --git a/sys/dev/arcmsr/arcmsr.c b/sys/dev/arcmsr/arcmsr.c
index b4e6ce5..6bcadb7 100644
--- a/sys/dev/arcmsr/arcmsr.c
+++ b/sys/dev/arcmsr/arcmsr.c
@@ -872,7 +872,7 @@ static void arcmsr_srb_timeout(void *arg)
ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
if(srb->srb_state == ARCMSR_SRB_START)
{
- cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
+ cmd = scsiio_cdb_ptr(&srb->pccb->csio)[0];
srb->srb_state = ARCMSR_SRB_TIMEOUT;
srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
arcmsr_srb_complete(srb, 1);
@@ -997,7 +997,7 @@ static void arcmsr_build_srb(struct CommandControlBlock *srb,
arcmsr_cdb->LUN = pccb->ccb_h.target_lun;
arcmsr_cdb->Function = 1;
arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len;
- bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
+ bcopy(scsiio_cdb_ptr(pcsio), arcmsr_cdb->Cdb, pcsio->cdb_len);
if(nseg != 0) {
struct AdapterControlBlock *acb = srb->acb;
bus_dmasync_op_t op;
@@ -2453,10 +2453,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
int retvalue = 0, transfer_len = 0;
char *buffer;
- u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
- (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
- (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 |
- (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
+ uint8_t *ptr = scsiio_cdb_ptr(&pccb->csio);
+ u_int32_t controlcode = (u_int32_t ) ptr[5] << 24 |
+ (u_int32_t ) ptr[6] << 16 |
+ (u_int32_t ) ptr[7] << 8 |
+ (u_int32_t ) ptr[8];
/* 4 bytes: Areca io control code */
if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
buffer = pccb->csio.data_ptr;
@@ -2683,7 +2684,7 @@ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg,
if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
u_int8_t block_cmd, cmd;
- cmd = pccb->csio.cdb_io.cdb_bytes[0];
+ cmd = scsiio_cdb_ptr(&pccb->csio)[0];
block_cmd = cmd & 0x0f;
if(block_cmd == 0x08 || block_cmd == 0x0a) {
printf("arcmsr%d:block 'read/write' command "
@@ -2800,7 +2801,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
return;
}
pccb->ccb_h.status |= CAM_REQ_CMP;
- switch (pccb->csio.cdb_io.cdb_bytes[0]) {
+ switch (scsiio_cdb_ptr(&pccb->csio)[0]) {
case INQUIRY: {
unsigned char inqdata[36];
char *buffer = pccb->csio.data_ptr;
@@ -2853,6 +2854,12 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
int target = pccb->ccb_h.target_id;
int error;
+ if (pccb->ccb_h.flags & CAM_CDB_PHYS) {
+ pccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(pccb);
+ return;
+ }
+
if(target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, pccb);
@@ -4143,7 +4150,7 @@ static u_int32_t arcmsr_initialize(device_t dev)
u_int32_t rid0 = PCIR_BAR(0);
vm_offset_t mem_base0;
- acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
+ acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
if(acb->sys_res_arcmsr[0] == NULL) {
arcmsr_free_resource(acb);
printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
@@ -4177,11 +4184,11 @@ static u_int32_t arcmsr_initialize(device_t dev)
size = sizeof(struct HBB_DOORBELL);
for(i=0; i < 2; i++) {
if(i == 0) {
- acb->sys_res_arcmsr[i] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
- 0ul, ~0ul, size, RF_ACTIVE);
+ acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i],
+ RF_ACTIVE);
} else {
- acb->sys_res_arcmsr[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
- 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
+ acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i],
+ RF_ACTIVE);
}
if(acb->sys_res_arcmsr[i] == NULL) {
arcmsr_free_resource(acb);
@@ -4224,7 +4231,7 @@ static u_int32_t arcmsr_initialize(device_t dev)
u_int32_t rid0 = PCIR_BAR(1);
vm_offset_t mem_base0;
- acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
+ acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
if(acb->sys_res_arcmsr[0] == NULL) {
arcmsr_free_resource(acb);
printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
@@ -4251,7 +4258,7 @@ static u_int32_t arcmsr_initialize(device_t dev)
u_int32_t rid0 = PCIR_BAR(0);
vm_offset_t mem_base0;
- acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBD_MessageUnit), RF_ACTIVE);
+ acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE);
if(acb->sys_res_arcmsr[0] == NULL) {
arcmsr_free_resource(acb);
printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index d976313..82d1dec 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -611,6 +611,7 @@ struct {
#endif
}, t6_pciids[] = {
{0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
+ {0x6400, "Chelsio T6225-DBG"}, /* 2 x 10/25G, debug */
{0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
{0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
{0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 9152d9f..d831f94 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -2289,7 +2289,7 @@ slowpath:
w = &eq->desc[eq->pidx];
IDXINCR(eq->pidx, ndesc, eq->sidx);
- if (__predict_false(eq->pidx < ndesc - 1)) {
+ if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
w = &wrq->ss[0];
wrq->ss_pidx = cookie->pidx;
wrq->ss_len = len16 * 16;
@@ -3296,12 +3296,13 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
- htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
c.eqaddr = htobe64(eq->ba);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 5ec627c..02f01ef 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -107,7 +107,7 @@ free_atid(struct adapter *sc, int atid)
}
/*
- * Active open failed.
+ * Active open succeeded.
*/
static int
do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
@@ -126,9 +126,10 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
free_atid(sc, atid);
+ CURVNET_SET(toep->vnet);
INP_WLOCK(inp);
toep->tid = tid;
- insert_tid(sc, tid, toep);
+ insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
if (inp->inp_flags & INP_DROPPED) {
/* socket closed by the kernel before hw told us it connected */
@@ -141,6 +142,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt);
done:
INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
return (0);
}
@@ -178,6 +180,7 @@ act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
free_atid(sc, atid);
toep->tid = -1;
+ CURVNET_SET(toep->vnet);
if (status != EAGAIN)
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
@@ -185,8 +188,12 @@ act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
final_cpl_received(toep); /* unlocks inp */
if (status != EAGAIN)
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
}
+/*
+ * Active open failed.
+ */
static int
do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
@@ -268,6 +275,14 @@ t4_init_connect_cpl_handlers(void)
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
}
+void
+t4_uninit_connect_cpl_handlers(void)
+{
+
+ t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL);
+ t4_register_cpl_handler(CPL_ACT_OPEN_RPL, NULL);
+}
+
#define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \
reason = __LINE__; \
rc = (x); \
@@ -357,6 +372,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
if (wr == NULL)
DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
+ toep->vnet = so->so_vnet;
if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)
set_tcpddp_ulp_mode(toep);
else
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index f446432..6e26c2b 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -302,7 +302,6 @@ make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
uint16_t tcpopt = be16toh(opt);
struct flowc_tx_params ftxp;
- CURVNET_SET(so->so_vnet);
INP_WLOCK_ASSERT(inp);
KASSERT(tp->t_state == TCPS_SYN_SENT ||
tp->t_state == TCPS_SYN_RECEIVED,
@@ -353,7 +352,6 @@ make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
send_flowc_wr(toep, &ftxp);
soisconnected(so);
- CURVNET_RESTORE();
}
static int
@@ -1107,6 +1105,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
tp = intotcpcb(inp);
@@ -1150,6 +1149,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
tcp_twstart(tp);
INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
INP_WLOCK(inp);
final_cpl_received(toep);
@@ -1162,6 +1162,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
done:
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
@@ -1188,6 +1189,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
tp = intotcpcb(inp);
@@ -1207,6 +1209,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
release:
INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
INP_WLOCK(inp);
final_cpl_received(toep); /* no more CPLs expected */
@@ -1231,6 +1234,7 @@ release:
done:
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
@@ -1304,6 +1308,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
inp = toep->inp;
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */
INP_WLOCK(inp);
@@ -1339,6 +1344,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
final_cpl_received(toep);
done:
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
return (0);
}
@@ -1456,18 +1462,21 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
SOCKBUF_UNLOCK(sb);
INP_WUNLOCK(inp);
+ CURVNET_SET(toep->vnet);
INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
tp = tcp_drop(tp, ECONNRESET);
if (tp)
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
/* receive buffer autosize */
- CURVNET_SET(so->so_vnet);
+ MPASS(toep->vnet == so->so_vnet);
+ CURVNET_SET(toep->vnet);
if (sb->sb_flags & SB_AUTOSIZE &&
V_tcp_do_autorcvbuf &&
sb->sb_hiwat < V_tcp_autorcvbuf_max &&
@@ -1674,10 +1683,12 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
if (toep->flags & TPF_TX_SUSPENDED &&
toep->tx_credits >= toep->tx_total / 4) {
toep->flags &= ~TPF_TX_SUSPENDED;
+ CURVNET_SET(toep->vnet);
if (toep->ulp_mode == ULP_MODE_ISCSI)
t4_push_pdus(sc, toep, plen);
else
t4_push_frames(sc, toep, plen);
+ CURVNET_RESTORE();
} else if (plen > 0) {
struct sockbuf *sb = &so->so_snd;
int sbu;
@@ -1787,11 +1798,11 @@ void
t4_uninit_cpl_io_handlers(void)
{
- t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
- t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
- t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
- t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
- t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
- t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
+ t4_register_cpl_handler(CPL_PEER_CLOSE, NULL);
+ t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL);
+ t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL);
+ t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL);
+ t4_register_cpl_handler(CPL_RX_DATA, NULL);
+ t4_register_cpl_handler(CPL_FW4_ACK, NULL);
}
#endif
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index f5f08c8..8dad045 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -392,6 +392,8 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
discourage_ddp(toep);
/* receive buffer autosize */
+ MPASS(toep->vnet == so->so_vnet);
+ CURVNET_SET(toep->vnet);
if (sb->sb_flags & SB_AUTOSIZE &&
V_tcp_do_autorcvbuf &&
sb->sb_hiwat < V_tcp_autorcvbuf_max &&
@@ -405,6 +407,7 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
else
toep->rx_credits += newsize - hiwat;
}
+ CURVNET_RESTORE();
KASSERT(toep->sb_cc >= sb->sb_cc,
("%s: sb %p has more data (%d) than last time (%d).",
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 2c2b427..844168d 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -220,6 +220,7 @@ alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi)
TAILQ_INIT(&lctx->synq);
lctx->inp = inp;
+ lctx->vnet = inp->inp_socket->so_vnet;
in_pcbref(inp);
return (lctx);
@@ -825,14 +826,16 @@ done_with_synqe(struct adapter *sc, struct synq_entry *synqe)
struct inpcb *inp = lctx->inp;
struct vi_info *vi = synqe->syn->m_pkthdr.rcvif->if_softc;
struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
+ int ntids;
INP_WLOCK_ASSERT(inp);
+ ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1;
TAILQ_REMOVE(&lctx->synq, synqe, link);
inp = release_lctx(sc, lctx);
if (inp)
INP_WUNLOCK(inp);
- remove_tid(sc, synqe->tid);
+ remove_tid(sc, synqe->tid, ntids);
release_tid(sc, synqe->tid, &sc->sge.ctrlq[vi->pi->port_id]);
t4_l2t_release(e);
release_synqe(synqe); /* removed from synq list */
@@ -1235,7 +1238,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
struct l2t_entry *e = NULL;
int rscale, mtu_idx, rx_credits, rxqid, ulp_mode;
struct synq_entry *synqe = NULL;
- int reject_reason, v;
+ int reject_reason, v, ntids;
uint16_t vid;
#ifdef INVARIANTS
unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
@@ -1253,6 +1256,8 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
pi = sc->port[G_SYN_INTF(be16toh(cpl->l2info))];
+ CURVNET_SET(lctx->vnet);
+
/*
* Use the MAC index to lookup the associated VI. If this SYN
* didn't match a perfect MAC filter, punt.
@@ -1309,6 +1314,8 @@ found:
*/
if (!ifnet_has_ip6(ifp, &inc.inc6_laddr))
REJECT_PASS_ACCEPT();
+
+ ntids = 2;
} else {
/* Don't offload if the ifcap isn't enabled */
@@ -1321,8 +1328,17 @@ found:
*/
if (!ifnet_has_ip(ifp, inc.inc_laddr))
REJECT_PASS_ACCEPT();
+
+ ntids = 1;
}
+ /*
+ * Don't offload if the ifnet that the SYN came in on is not in the same
+ * vnet as the listening socket.
+ */
+ if (lctx->vnet != ifp->if_vnet)
+ REJECT_PASS_ACCEPT();
+
e = get_l2te_for_nexthop(pi, ifp, &inc);
if (e == NULL)
REJECT_PASS_ACCEPT();
@@ -1362,7 +1378,6 @@ found:
REJECT_PASS_ACCEPT();
}
so = inp->inp_socket;
- CURVNET_SET(so->so_vnet);
mtu_idx = find_best_mtu_idx(sc, &inc, be16toh(cpl->tcpopt.mss));
rscale = cpl->tcpopt.wsf && V_tcp_do_rfc1323 ? select_rcv_wscale() : 0;
@@ -1398,7 +1413,7 @@ found:
synqe->rcv_bufsize = rx_credits;
atomic_store_rel_ptr(&synqe->wr, (uintptr_t)wr);
- insert_tid(sc, tid, synqe);
+ insert_tid(sc, tid, synqe, ntids);
TAILQ_INSERT_TAIL(&lctx->synq, synqe, link);
hold_synqe(synqe); /* hold for the duration it's in the synq */
hold_lctx(lctx); /* A synqe on the list has a ref on its lctx */
@@ -1409,7 +1424,6 @@ found:
*/
toe_syncache_add(&inc, &to, &th, inp, tod, synqe);
INP_UNLOCK_ASSERT(inp); /* ok to assert, we have a ref on the inp */
- CURVNET_RESTORE();
/*
* If we replied during syncache_add (synqe->wr has been consumed),
@@ -1427,7 +1441,7 @@ found:
if (m)
m->m_pkthdr.rcvif = hw_ifp;
- remove_tid(sc, synqe->tid);
+ remove_tid(sc, synqe->tid, ntids);
free(wr, M_CXGBE);
/* Yank the synqe out of the lctx synq. */
@@ -1459,15 +1473,18 @@ found:
if (!(synqe->flags & TPF_SYNQE_EXPANDED))
send_reset_synqe(tod, synqe);
INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
release_synqe(synqe); /* extra hold */
return (__LINE__);
}
INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
release_synqe(synqe); /* extra hold */
return (0);
reject:
+ CURVNET_RESTORE();
CTR4(KTR_CXGBE, "%s: stid %u, tid %u, REJECT (%d)", __func__, stid, tid,
reject_reason);
@@ -1539,6 +1556,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
KASSERT(synqe->flags & TPF_SYNQE,
("%s: tid %u (ctx %p) not a synqe", __func__, tid, synqe));
+ CURVNET_SET(lctx->vnet);
INP_INFO_RLOCK(&V_tcbinfo); /* for syncache_expand */
INP_WLOCK(inp);
@@ -1556,6 +1574,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
@@ -1581,6 +1600,7 @@ reset:
send_reset_synqe(TOEDEV(ifp), synqe);
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
return (0);
}
toep->tid = tid;
@@ -1617,6 +1637,8 @@ reset:
/* New connection inpcb is already locked by syncache_expand(). */
new_inp = sotoinpcb(so);
INP_WLOCK_ASSERT(new_inp);
+ MPASS(so->so_vnet == lctx->vnet);
+ toep->vnet = lctx->vnet;
/*
* This is for the unlikely case where the syncache entry that we added
@@ -1640,6 +1662,7 @@ reset:
if (inp != NULL)
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
release_synqe(synqe);
return (0);
@@ -1654,4 +1677,14 @@ t4_init_listen_cpl_handlers(void)
t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
}
+
+void
+t4_uninit_listen_cpl_handlers(void)
+{
+
+ t4_register_cpl_handler(CPL_PASS_OPEN_RPL, NULL);
+ t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, NULL);
+ t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, NULL);
+ t4_register_cpl_handler(CPL_PASS_ESTABLISH, NULL);
+}
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index f62fcac..4c58cf9 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -321,7 +321,7 @@ release_offload_resources(struct toepcb *toep)
t4_l2t_release(toep->l2te);
if (tid >= 0) {
- remove_tid(sc, tid);
+ remove_tid(sc, tid, toep->ce ? 2 : 1);
release_tid(sc, tid, toep->ctrlq);
}
@@ -432,12 +432,12 @@ final_cpl_received(struct toepcb *toep)
}
void
-insert_tid(struct adapter *sc, int tid, void *ctx)
+insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
{
struct tid_info *t = &sc->tids;
t->tid_tab[tid] = ctx;
- atomic_add_int(&t->tids_in_use, 1);
+ atomic_add_int(&t->tids_in_use, ntids);
}
void *
@@ -457,12 +457,12 @@ update_tid(struct adapter *sc, int tid, void *ctx)
}
void
-remove_tid(struct adapter *sc, int tid)
+remove_tid(struct adapter *sc, int tid, int ntids)
{
struct tid_info *t = &sc->tids;
t->tid_tab[tid] = NULL;
- atomic_subtract_int(&t->tids_in_use, 1);
+ atomic_subtract_int(&t->tids_in_use, ntids);
}
void
@@ -811,74 +811,96 @@ update_clip_table(struct adapter *sc, struct tom_data *td)
struct in6_addr *lip, tlip;
struct clip_head stale;
struct clip_entry *ce, *ce_temp;
- int rc, gen = atomic_load_acq_int(&in6_ifaddr_gen);
+ struct vi_info *vi;
+ int rc, gen, i, j;
+ uintptr_t last_vnet;
ASSERT_SYNCHRONIZED_OP(sc);
IN6_IFADDR_RLOCK();
mtx_lock(&td->clip_table_lock);
+ gen = atomic_load_acq_int(&in6_ifaddr_gen);
if (gen == td->clip_gen)
goto done;
TAILQ_INIT(&stale);
TAILQ_CONCAT(&stale, &td->clip_table, link);
- TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
- lip = &ia->ia_addr.sin6_addr;
+ /*
+ * last_vnet optimizes the common cases where all if_vnet = NULL (no
+ * VIMAGE) or all if_vnet = vnet0.
+ */
+ last_vnet = (uintptr_t)(-1);
+ for_each_port(sc, i)
+ for_each_vi(sc->port[i], j, vi) {
+ if (last_vnet == (uintptr_t)vi->ifp->if_vnet)
+ continue;
- KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
- ("%s: mcast address in in6_ifaddr list", __func__));
+ /* XXX: races with if_vmove */
+ CURVNET_SET(vi->ifp->if_vnet);
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ lip = &ia->ia_addr.sin6_addr;
+
+ KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
+ ("%s: mcast address in in6_ifaddr list", __func__));
+
+ if (IN6_IS_ADDR_LOOPBACK(lip))
+ continue;
+ if (IN6_IS_SCOPE_EMBED(lip)) {
+ /* Remove the embedded scope */
+ tlip = *lip;
+ lip = &tlip;
+ in6_clearscope(lip);
+ }
+ /*
+ * XXX: how to weed out the link local address for the
+ * loopback interface? It's fe80::1 usually (always?).
+ */
+
+ /*
+ * If it's in the main list then we already know it's
+ * not stale.
+ */
+ TAILQ_FOREACH(ce, &td->clip_table, link) {
+ if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
+ goto next;
+ }
- if (IN6_IS_ADDR_LOOPBACK(lip))
- continue;
- if (IN6_IS_SCOPE_EMBED(lip)) {
- /* Remove the embedded scope */
- tlip = *lip;
- lip = &tlip;
- in6_clearscope(lip);
- }
- /*
- * XXX: how to weed out the link local address for the loopback
- * interface? It's fe80::1 usually (always?).
- */
-
- /*
- * If it's in the main list then we already know it's not stale.
- */
- TAILQ_FOREACH(ce, &td->clip_table, link) {
- if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
- goto next;
- }
+ /*
+ * If it's in the stale list we should move it to the
+ * main list.
+ */
+ TAILQ_FOREACH(ce, &stale, link) {
+ if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) {
+ TAILQ_REMOVE(&stale, ce, link);
+ TAILQ_INSERT_TAIL(&td->clip_table, ce,
+ link);
+ goto next;
+ }
+ }
- /*
- * If it's in the stale list we should move it to the main list.
- */
- TAILQ_FOREACH(ce, &stale, link) {
- if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) {
- TAILQ_REMOVE(&stale, ce, link);
+ /* A new IP6 address; add it to the CLIP table */
+ ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT);
+ memcpy(&ce->lip, lip, sizeof(ce->lip));
+ ce->refcount = 0;
+ rc = add_lip(sc, lip);
+ if (rc == 0)
TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
- goto next;
- }
- }
+ else {
+ char ip[INET6_ADDRSTRLEN];
- /* A new IP6 address; add it to the CLIP table */
- ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT);
- memcpy(&ce->lip, lip, sizeof(ce->lip));
- ce->refcount = 0;
- rc = add_lip(sc, lip);
- if (rc == 0)
- TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
- else {
- char ip[INET6_ADDRSTRLEN];
-
- inet_ntop(AF_INET6, &ce->lip, &ip[0], sizeof(ip));
- log(LOG_ERR, "%s: could not add %s (%d)\n",
- __func__, ip, rc);
- free(ce, M_CXGBE);
- }
+ inet_ntop(AF_INET6, &ce->lip, &ip[0],
+ sizeof(ip));
+ log(LOG_ERR, "%s: could not add %s (%d)\n",
+ __func__, ip, rc);
+ free(ce, M_CXGBE);
+ }
next:
- continue;
+ continue;
+ }
+ CURVNET_RESTORE();
+ last_vnet = (uintptr_t)vi->ifp->if_vnet;
}
/*
@@ -1203,6 +1225,10 @@ t4_tom_mod_unload(void)
t4_ddp_mod_unload();
+ t4_uninit_connect_cpl_handlers();
+ t4_uninit_listen_cpl_handlers();
+ t4_uninit_cpl_io_handlers();
+
return (0);
}
#endif /* TCP_OFFLOAD */
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index a1d723d..f5ca0cf 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -123,6 +123,7 @@ struct toepcb {
u_int flags; /* miscellaneous flags */
struct tom_data *td;
struct inpcb *inp; /* backpointer to host stack's PCB */
+ struct vnet *vnet;
struct vi_info *vi; /* virtual interface */
struct sge_wrq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
@@ -199,6 +200,7 @@ struct listen_ctx {
struct stid_region stid_region;
int flags;
struct inpcb *inp; /* listening socket's inp */
+ struct vnet *vnet;
struct sge_wrq *ctrlq;
struct sge_ofld_rxq *ofld_rxq;
struct clip_entry *ce;
@@ -278,10 +280,10 @@ void free_toepcb(struct toepcb *);
void offload_socket(struct socket *, struct toepcb *);
void undo_offload_socket(struct socket *);
void final_cpl_received(struct toepcb *);
-void insert_tid(struct adapter *, int, void *);
+void insert_tid(struct adapter *, int, void *, int);
void *lookup_tid(struct adapter *, int);
void update_tid(struct adapter *, int, void *);
-void remove_tid(struct adapter *, int);
+void remove_tid(struct adapter *, int, int);
void release_tid(struct adapter *, int, struct sge_wrq *);
int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int);
u_long select_rcv_wnd(struct socket *);
@@ -296,12 +298,14 @@ void release_lip(struct tom_data *, struct clip_entry *);
/* t4_connect.c */
void t4_init_connect_cpl_handlers(void);
+void t4_uninit_connect_cpl_handlers(void);
int t4_connect(struct toedev *, struct socket *, struct rtentry *,
struct sockaddr *);
void act_open_failure_cleanup(struct adapter *, u_int, u_int);
/* t4_listen.c */
void t4_init_listen_cpl_handlers(void);
+void t4_uninit_listen_cpl_handlers(void);
int t4_listen_start(struct toedev *, struct tcpcb *);
int t4_listen_stop(struct toedev *, struct tcpcb *);
void t4_syncache_added(struct toedev *, void *);
diff --git a/sys/dev/hpt27xx/hpt27xx_os_bsd.c b/sys/dev/hpt27xx/hpt27xx_os_bsd.c
index 7970e35..12e0efa 100644
--- a/sys/dev/hpt27xx/hpt27xx_os_bsd.c
+++ b/sys/dev/hpt27xx/hpt27xx_os_bsd.c
@@ -120,13 +120,13 @@ void *os_map_pci_bar(
if (base & 1) {
hba->pcibar[index].type = SYS_RES_IOPORT;
- hba->pcibar[index].res = bus_alloc_resource(hba->pcidev,
- hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE);
+ hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev,
+ hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE);
hba->pcibar[index].base = (void *)(unsigned long)(base & ~0x1);
} else {
hba->pcibar[index].type = SYS_RES_MEMORY;
- hba->pcibar[index].res = bus_alloc_resource(hba->pcidev,
- hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE);
+ hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev,
+ hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE);
hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset;
}
diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c
index 37ff6c2..f4a3dd9 100644
--- a/sys/dev/hptmv/entry.c
+++ b/sys/dev/hptmv/entry.c
@@ -1356,8 +1356,8 @@ init_adapter(IAL_ADAPTER_T *pAdapter)
/* also map EPROM address */
rid = 0x10;
- if (!(pAdapter->mem_res = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid,
- 0, ~0, MV_SATA_PCI_BAR0_SPACE_SIZE+0x40000, RF_ACTIVE))
+ if (!(pAdapter->mem_res = bus_alloc_resource_any(pAdapter->hpt_dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE))
||
!(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res)))
{
diff --git a/sys/dev/hptnr/hptnr_os_bsd.c b/sys/dev/hptnr/hptnr_os_bsd.c
index b019a7c..36f0c3c 100644
--- a/sys/dev/hptnr/hptnr_os_bsd.c
+++ b/sys/dev/hptnr/hptnr_os_bsd.c
@@ -106,13 +106,13 @@ void *os_map_pci_bar(
if (base & 1) {
hba->pcibar[index].type = SYS_RES_IOPORT;
- hba->pcibar[index].res = bus_alloc_resource(hba->pcidev,
- hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE);
+ hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev,
+ hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE);
hba->pcibar[index].base = (void *)(unsigned long)(base & ~0x1);
} else {
hba->pcibar[index].type = SYS_RES_MEMORY;
- hba->pcibar[index].res = bus_alloc_resource(hba->pcidev,
- hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE);
+ hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev,
+ hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE);
hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset;
}
diff --git a/sys/dev/hptrr/hptrr_os_bsd.c b/sys/dev/hptrr/hptrr_os_bsd.c
index 8600a8c..6cd01e5 100644
--- a/sys/dev/hptrr/hptrr_os_bsd.c
+++ b/sys/dev/hptrr/hptrr_os_bsd.c
@@ -98,8 +98,8 @@ void *os_map_pci_bar(
else
hba->pcibar[index].type = SYS_RES_MEMORY;
- hba->pcibar[index].res = bus_alloc_resource(hba->pcidev,
- hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE);
+ hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev,
+ hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE);
hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset;
return hba->pcibar[index].base;
diff --git a/sys/dev/iir/iir.c b/sys/dev/iir/iir.c
index bf5cec5..e74698e 100644
--- a/sys/dev/iir/iir.c
+++ b/sys/dev/iir/iir.c
@@ -744,9 +744,9 @@ gdt_next(struct gdt_softc *gdt)
ccb->ccb_h.flags));
csio = &ccb->csio;
ccbh = &ccb->ccb_h;
- cmd = csio->cdb_io.cdb_bytes[0];
- /* Max CDB length is 12 bytes */
- if (csio->cdb_len > 12) {
+ cmd = scsiio_cdb_ptr(csio)[0];
+ /* Max CDB length is 12 bytes, can't be phys addr */
+ if (csio->cdb_len > 12 || (ccbh->flags & CAM_CDB_PHYS)) {
ccbh->status = CAM_REQ_INVALID;
--gdt_stat.io_count_act;
xpt_done(ccb);
diff --git a/sys/dev/isci/isci_controller.c b/sys/dev/isci/isci_controller.c
index b0f4285..d3ec045 100644
--- a/sys/dev/isci/isci_controller.c
+++ b/sys/dev/isci/isci_controller.c
@@ -740,6 +740,11 @@ void isci_action(struct cam_sim *sim, union ccb *ccb)
}
break;
case XPT_SCSI_IO:
+ if (ccb->ccb_h.flags & CAM_CDB_PHYS) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
isci_io_request_execute_scsi_io(ccb, controller);
break;
#if __FreeBSD_version >= 900026
@@ -802,6 +807,7 @@ isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
{
struct ISCI_REMOTE_DEVICE *dev;
struct ccb_hdr *ccb_h;
+ uint8_t *ptr;
int dev_idx;
KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));
@@ -821,8 +827,8 @@ isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
if (ccb_h == NULL)
continue;
- isci_log_message(1, "ISCI", "release %p %x\n", ccb_h,
- ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]);
+ ptr = scsiio_cdb_ptr(&((union ccb *)ccb_h)->csio);
+ isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, *ptr);
dev->queued_ccb_in_progress = (union ccb *)ccb_h;
isci_io_request_execute_scsi_io(
diff --git a/sys/dev/isci/isci_io_request.c b/sys/dev/isci/isci_io_request.c
index 4151900..066e0d9 100644
--- a/sys/dev/isci/isci_io_request.c
+++ b/sys/dev/isci/isci_io_request.c
@@ -86,6 +86,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
struct ISCI_REMOTE_DEVICE *isci_remote_device;
union ccb *ccb;
BOOL complete_ccb;
+ struct ccb_scsiio *csio;
complete_ccb = TRUE;
isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
@@ -93,7 +94,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
(struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);
ccb = isci_request->ccb;
-
+ csio = &ccb->csio;
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
switch (completion_status) {
@@ -124,7 +125,6 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
SCI_SSP_RESPONSE_IU_T * response_buffer;
uint32_t sense_length;
int error_code, sense_key, asc, ascq;
- struct ccb_scsiio *csio = &ccb->csio;
response_buffer = (SCI_SSP_RESPONSE_IU_T *)
scif_io_request_get_response_iu_address(
@@ -146,7 +146,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
isci_log_message(1, "ISCI",
"isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
ccb->ccb_h.path_id, ccb->ccb_h.target_id,
- ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0],
+ ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
csio->scsi_status, sense_key, asc, ascq);
break;
}
@@ -157,7 +157,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
isci_log_message(0, "ISCI",
"isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n",
ccb->ccb_h.path_id, ccb->ccb_h.target_id,
- ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
+ ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
break;
case SCI_IO_FAILURE_TERMINATED:
@@ -165,7 +165,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
isci_log_message(0, "ISCI",
"isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
ccb->ccb_h.path_id, ccb->ccb_h.target_id,
- ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
+ ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio));
break;
case SCI_IO_FAILURE_INVALID_STATE:
@@ -208,7 +208,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
isci_log_message(1, "ISCI",
"isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
ccb->ccb_h.path_id, ccb->ccb_h.target_id,
- ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0],
+ ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio),
completion_status);
ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
break;
@@ -285,13 +285,13 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
* get a ready notification for this device.
*/
isci_log_message(1, "ISCI", "already queued %p %x\n",
- ccb, ccb->csio.cdb_io.cdb_bytes[0]);
+ ccb, scsiio_cdb_ptr(csio));
isci_remote_device->queued_ccb_in_progress = NULL;
} else {
isci_log_message(1, "ISCI", "queue %p %x\n", ccb,
- ccb->csio.cdb_io.cdb_bytes[0]);
+ scsiio_cdb_ptr(csio));
ccb->ccb_h.status |= CAM_SIM_QUEUED;
TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs,
@@ -373,7 +373,7 @@ scif_cb_io_request_get_cdb_address(void * scif_user_io_request)
struct ISCI_IO_REQUEST *isci_request =
(struct ISCI_IO_REQUEST *)scif_user_io_request;
- return (isci_request->ccb->csio.cdb_io.cdb_bytes);
+ return (scsiio_cdb_ptr(&isci_request->ccb->csio));
}
/**
diff --git a/sys/dev/isci/isci_task_request.c b/sys/dev/isci/isci_task_request.c
index 6c8be45..2ed0afe 100644
--- a/sys/dev/isci/isci_task_request.c
+++ b/sys/dev/isci/isci_task_request.c
@@ -210,8 +210,9 @@ isci_task_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
retry_task = FALSE;
isci_log_message(0, "ISCI",
"task timeout - not retrying\n");
- scif_cb_domain_device_removed(isci_controller,
- isci_remote_device->domain, isci_remote_device);
+ scif_cb_domain_device_removed(scif_controller,
+ isci_remote_device->domain->sci_object,
+ remote_device);
} else {
retry_task = TRUE;
isci_log_message(0, "ISCI",
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 427f545..f059b2b 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -3800,6 +3800,7 @@ ixgbe_handle_msf(void *context, int pending)
/* Adjust media types shown in ifconfig */
ifmedia_removeall(&adapter->media);
ixgbe_add_media_types(adapter);
+ ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
return;
}
diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c
index 9bdb229..781d337 100644
--- a/sys/dev/jme/if_jme.c
+++ b/sys/dev/jme/if_jme.c
@@ -803,7 +803,7 @@ jme_attach(device_t dev)
}
/* Create coalescing sysctl node. */
jme_sysctl_node(sc);
- if ((error = jme_dma_alloc(sc) != 0))
+ if ((error = jme_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
diff --git a/sys/dev/kbd/kbd.c b/sys/dev/kbd/kbd.c
index ea84d46..3f076b0 100644
--- a/sys/dev/kbd/kbd.c
+++ b/sys/dev/kbd/kbd.c
@@ -884,7 +884,7 @@ genkbd_commonioctl(keyboard_t *kbd, u_long cmd, caddr_t arg)
omapp->key[i].spcl = mapp->key[i].spcl;
omapp->key[i].flgs = mapp->key[i].flgs;
}
- return (0);
+ break;
case PIO_KEYMAP: /* set keyboard translation table */
case OPIO_KEYMAP: /* set keyboard translation table (compat) */
#ifndef KBD_DISABLE_KEYMAP_LOAD
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index e84e05a..bb6ba93 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -306,6 +306,12 @@ mlx5e_update_carrier_work(struct work_struct *work)
PRIV_UNLOCK(priv);
}
+/*
+ * This function reads the physical port counters from the firmware
+ * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
+ * macros. The output is converted from big-endian 64-bit values into
+ * host endian ones and stored in the "priv->stats.pport" structure.
+ */
static void
mlx5e_update_pport_counters(struct mlx5e_priv *priv)
{
@@ -314,25 +320,32 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
u32 *in;
u32 *out;
- u64 *ptr;
+ const u64 *ptr;
unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
unsigned x;
unsigned y;
+ /* allocate firmware request structures */
in = mlx5_vzalloc(sz);
out = mlx5_vzalloc(sz);
if (in == NULL || out == NULL)
goto free_out;
- ptr = (uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
+ /*
+ * Get pointer to the 64-bit counter set which is located at a
+ * fixed offset in the output firmware request structure:
+ */
+ ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
MLX5_SET(ppcnt_reg, in, local_port, 1);
+ /* read IEEE802_3 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = y = 0; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
s->arg[y] = be64toh(ptr[x]);
+ /* read RFC2819 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
@@ -341,20 +354,29 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
+ /* read RFC2863 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
+ /* read physical layer stats counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
free_out:
+ /* free firmware request structures */
kvfree(in);
kvfree(out);
}
+/*
+ * This function is called regularly to collect all statistics
+ * counters from the firmware. The values can be viewed through the
+ * sysctl interface. Execution is serialized using the priv's global
+ * configuration lock.
+ */
static void
mlx5e_update_stats_work(struct work_struct *work)
{
diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c
index a41d807..3c70a99 100644
--- a/sys/dev/mmc/mmc.c
+++ b/sys/dev/mmc/mmc.c
@@ -401,7 +401,7 @@ mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req)
msleep(req, &sc->sc_mtx, 0, "mmcreq", 0);
MMC_UNLOCK(sc);
if (mmc_debug > 2 || (mmc_debug > 0 && req->cmd->error != MMC_ERR_NONE))
- device_printf(sc->dev, "CMD%d RESULT: %d\n",
+ device_printf(sc->dev, "CMD%d RESULT: %d\n",
req->cmd->opcode, req->cmd->error);
return (0);
}
@@ -511,7 +511,7 @@ mmc_idle_cards(struct mmc_softc *sc)
{
device_t dev;
struct mmc_command cmd;
-
+
dev = sc->dev;
mmcbr_set_chip_select(dev, cs_high);
mmcbr_update_ios(dev);
@@ -795,7 +795,7 @@ mmc_test_bus_width(struct mmc_softc *sc)
data.len = 8;
data.flags = MMC_DATA_WRITE;
mmc_wait_for_cmd(sc, &cmd, 0);
-
+
memset(&cmd, 0, sizeof(cmd));
memset(&data, 0, sizeof(data));
cmd.opcode = MMC_BUSTEST_R;
@@ -808,7 +808,7 @@ mmc_test_bus_width(struct mmc_softc *sc)
data.flags = MMC_DATA_READ;
err = mmc_wait_for_cmd(sc, &cmd, 0);
sc->squelched--;
-
+
mmcbr_set_bus_width(sc->dev, bus_width_1);
mmcbr_update_ios(sc->dev);
@@ -832,7 +832,7 @@ mmc_test_bus_width(struct mmc_softc *sc)
data.len = 4;
data.flags = MMC_DATA_WRITE;
mmc_wait_for_cmd(sc, &cmd, 0);
-
+
memset(&cmd, 0, sizeof(cmd));
memset(&data, 0, sizeof(data));
cmd.opcode = MMC_BUSTEST_R;
@@ -1017,7 +1017,7 @@ mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd)
csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3);
csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4);
csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1);
- } else
+ } else
panic("unknown SD CSD version");
}
@@ -1349,9 +1349,9 @@ mmc_discover_cards(struct mmc_softc *sc)
if (ivar->csd.csd_structure > 0)
ivar->high_cap = 1;
ivar->tran_speed = ivar->csd.tran_speed;
- ivar->erase_sector = ivar->csd.erase_sector *
+ ivar->erase_sector = ivar->csd.erase_sector *
ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
-
+
err = mmc_send_status(sc, ivar->rca, &status);
if (err != MMC_ERR_NONE) {
device_printf(sc->dev,
@@ -1446,7 +1446,7 @@ mmc_discover_cards(struct mmc_softc *sc)
mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd);
ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE;
ivar->tran_speed = ivar->csd.tran_speed;
- ivar->erase_sector = ivar->csd.erase_sector *
+ ivar->erase_sector = ivar->csd.erase_sector *
ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
err = mmc_send_status(sc, ivar->rca, &status);
@@ -1655,7 +1655,7 @@ mmc_calculate_clock(struct mmc_softc *sc)
int nkid, i, f_max;
device_t *kids;
struct mmc_ivars *ivar;
-
+
f_max = mmcbr_get_f_max(sc->dev);
max_dtr = max_hs_dtr = f_max;
if ((mmcbr_get_caps(sc->dev) & MMC_CAP_HSPEED))
@@ -1770,7 +1770,7 @@ static void
mmc_delayed_attach(void *xsc)
{
struct mmc_softc *sc = xsc;
-
+
mmc_scan(sc);
config_intrhook_disestablish(&sc->config_intrhook);
}
diff --git a/sys/dev/mmc/mmcreg.h b/sys/dev/mmc/mmcreg.h
index f454ddb..080c14e 100644
--- a/sys/dev/mmc/mmcreg.h
+++ b/sys/dev/mmc/mmcreg.h
@@ -85,8 +85,11 @@ struct mmc_command {
#define MMC_RSP_R1B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
-#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC)
-#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC)
+#define MMC_RSP_R4 (MMC_RSP_PRESENT)
+#define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
+#define MMC_RSP_R5B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY)
+#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
+#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP(x) ((x) & MMC_RSP_MASK)
uint32_t retries;
uint32_t error;
@@ -352,8 +355,8 @@ struct mmc_request {
*/
#define MMC_OCR_VOLTAGE 0x3fffffffU /* Vdd Voltage mask */
#define MMC_OCR_LOW_VOLTAGE (1u << 7) /* Low Voltage Range -- tbd */
+#define MMC_OCR_MIN_VOLTAGE_SHIFT 7
#define MMC_OCR_200_210 (1U << 8) /* Vdd voltage 2.00 ~ 2.10 */
-#define MMC_OCR_MIN_VOLTAGE_SHIFT 8
#define MMC_OCR_210_220 (1U << 9) /* Vdd voltage 2.10 ~ 2.20 */
#define MMC_OCR_220_230 (1U << 10) /* Vdd voltage 2.20 ~ 2.30 */
#define MMC_OCR_230_240 (1U << 11) /* Vdd voltage 2.30 ~ 2.40 */
diff --git a/sys/dev/mmc/mmcsd.c b/sys/dev/mmc/mmcsd.c
index 72094ed..1931f7b 100644
--- a/sys/dev/mmc/mmcsd.c
+++ b/sys/dev/mmc/mmcsd.c
@@ -160,16 +160,16 @@ mmcsd_attach(device_t dev)
d->d_dump = mmcsd_dump;
d->d_name = "mmcsd";
d->d_drv1 = sc;
- d->d_maxsize = 4*1024*1024; /* Maximum defined SD card AU size. */
d->d_sectorsize = mmc_get_sector_size(dev);
+ d->d_maxsize = mmc_get_max_data(dev) * d->d_sectorsize;
d->d_mediasize = (off_t)mmc_get_media_size(dev) * d->d_sectorsize;
- d->d_stripeoffset = 0;
d->d_stripesize = mmc_get_erase_sector(dev) * d->d_sectorsize;
d->d_unit = device_get_unit(dev);
d->d_flags = DISKFLAG_CANDELETE;
- d->d_delmaxsize = mmc_get_erase_sector(dev) * d->d_sectorsize * 1; /* conservative */
+ d->d_delmaxsize = mmc_get_erase_sector(dev) * d->d_sectorsize;
strlcpy(d->d_ident, mmc_get_card_sn_string(dev), sizeof(d->d_ident));
strlcpy(d->d_descr, mmc_get_card_id_string(dev), sizeof(d->d_descr));
+ d->d_rotation_rate = DISK_RR_NON_ROTATING;
/*
* Display in most natural units. There's no cards < 1MB. The SD
@@ -545,6 +545,8 @@ mmcsd_task(void *arg)
bp->bio_error = EIO;
bp->bio_resid = (end - block) * sz;
bp->bio_flags |= BIO_ERROR;
+ } else {
+ bp->bio_resid = 0;
}
biodone(bp);
}
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
index d44e502..c9d83d8 100644
--- a/sys/dev/mpr/mpr_sas.c
+++ b/sys/dev/mpr/mpr_sas.c
@@ -1846,8 +1846,12 @@ mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
if (csio->ccb_h.flags & CAM_CDB_POINTER)
bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
- else
+ else {
+ KASSERT(csio->cdb_len <= IOCDBLEN,
+ ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER is not set",
+ csio->cdb_len));
bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
+ }
req->IoFlags = htole16(csio->cdb_len);
/*
@@ -2429,6 +2433,7 @@ mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
* driver is being shutdown.
*/
if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
+ (csio->data_ptr != NULL) &&
((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
(sc->mapping_table[target_id].device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
diff --git a/sys/dev/mpr/mpr_sas_lsi.c b/sys/dev/mpr/mpr_sas_lsi.c
index 73e2f54..1eb78d1 100644
--- a/sys/dev/mpr/mpr_sas_lsi.c
+++ b/sys/dev/mpr/mpr_sas_lsi.c
@@ -1056,6 +1056,7 @@ out:
mpr_free_command(sc, cm);
else if (error == 0)
error = EWOULDBLOCK;
+ cm->cm_data = NULL;
free(buffer, M_MPR);
return (error);
}
@@ -1196,18 +1197,18 @@ mprsas_SSU_to_SATA_devices(struct mpr_softc *sc)
continue;
}
- ccb = xpt_alloc_ccb_nowait();
- if (ccb == NULL) {
- mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop "
- "unit.\n");
- return;
- }
-
/*
* The stop_at_shutdown flag will be set if this device is
* a SATA direct-access end device.
*/
if (target->stop_at_shutdown) {
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop "
+ "unit.\n");
+ return;
+ }
+
if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
pathid, targetid, CAM_LUN_WILDCARD) !=
CAM_REQ_CMP) {
diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c
index 024f937..c464628 100644
--- a/sys/dev/msk/if_msk.c
+++ b/sys/dev/msk/if_msk.c
@@ -1626,7 +1626,7 @@ msk_attach(device_t dev)
callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
msk_sysctl_node(sc_if);
- if ((error = msk_txrx_dma_alloc(sc_if) != 0))
+ if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
goto fail;
msk_rx_dma_jalloc(sc_if);
diff --git a/sys/dev/nand/nand_geom.c b/sys/dev/nand/nand_geom.c
index 8786a0a..98373aa 100644
--- a/sys/dev/nand/nand_geom.c
+++ b/sys/dev/nand/nand_geom.c
@@ -394,6 +394,7 @@ create_geom_disk(struct nand_chip *chip)
snprintf(ndisk->d_ident, sizeof(ndisk->d_ident),
"nand: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id);
+ ndisk->d_rotation_rate = DISK_RR_NON_ROTATING;
disk_create(ndisk, DISK_VERSION);
@@ -415,6 +416,7 @@ create_geom_disk(struct nand_chip *chip)
snprintf(rdisk->d_ident, sizeof(rdisk->d_ident),
"nand_raw: Man:0x%02x Dev:0x%02x", chip->id.man_id,
chip->id.dev_id);
+ disk->d_rotation_rate = DISK_RR_NON_ROTATING;
disk_create(rdisk, DISK_VERSION);
diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c
index 33645c4..47d6dd8 100644
--- a/sys/dev/ntb/if_ntb/if_ntb.c
+++ b/sys/dev/ntb/if_ntb/if_ntb.c
@@ -237,6 +237,11 @@ ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
int error = 0;
switch (command) {
+ case SIOCSIFFLAGS:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
case SIOCSIFMTU:
{
if (ifr->ifr_mtu > sc->mtu - ETHER_HDR_LEN) {
diff --git a/sys/dev/nvd/nvd.c b/sys/dev/nvd/nvd.c
index 989ed92..11e4f58 100644
--- a/sys/dev/nvd/nvd.c
+++ b/sys/dev/nvd/nvd.c
@@ -338,13 +338,11 @@ nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
*/
nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
-
nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
NVME_MODEL_NUMBER_LENGTH);
-
-#if __FreeBSD_version >= 900034
strlcpy(disk->d_descr, descr, sizeof(descr));
-#endif
+
+ disk->d_rotation_rate = DISK_RR_NON_ROTATING;
ndisk->ns = ns;
ndisk->disk = disk;
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 792469f..26cf526 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -269,12 +269,13 @@ static const struct pci_quirk pci_quirks[] = {
{ 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
/*
- * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
- * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
- * command register is set.
+ * Atheros AR8161/AR8162/E2200/E2400 Ethernet controllers have a
+ * bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
+ * of the command register is set.
*/
{ 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
+ { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
/*
diff --git a/sys/dev/ppbus/vpo.c b/sys/dev/ppbus/vpo.c
index 9c9054f..a4f5226 100644
--- a/sys/dev/ppbus/vpo.c
+++ b/sys/dev/ppbus/vpo.c
@@ -187,17 +187,19 @@ vpo_intr(struct vpo_data *vpo, struct ccb_scsiio *csio)
#ifdef VP0_DEBUG
int i;
#endif
+ uint8_t *ptr;
+ ptr = scsiio_cdb_ptr(csio);
if (vpo->vpo_isplus) {
errno = imm_do_scsi(&vpo->vpo_io, VP0_INITIATOR,
csio->ccb_h.target_id,
- (char *)&csio->cdb_io.cdb_bytes, csio->cdb_len,
+ ptr, csio->cdb_len,
(char *)csio->data_ptr, csio->dxfer_len,
&vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error);
} else {
errno = vpoio_do_scsi(&vpo->vpo_io, VP0_INITIATOR,
csio->ccb_h.target_id,
- (char *)&csio->cdb_io.cdb_bytes, csio->cdb_len,
+ ptr, csio->cdb_len,
(char *)csio->data_ptr, csio->dxfer_len,
&vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error);
}
@@ -208,7 +210,7 @@ vpo_intr(struct vpo_data *vpo, struct ccb_scsiio *csio)
/* dump of command */
for (i=0; i<csio->cdb_len; i++)
- printf("%x ", ((char *)&csio->cdb_io.cdb_bytes)[i]);
+ printf("%x ", ((char *)ptr)[i]);
printf("\n");
#endif
@@ -307,11 +309,15 @@ vpo_action(struct cam_sim *sim, union ccb *ccb)
csio = &ccb->csio;
+ if (ccb->ccb_h.flags & CAM_CDB_PHYS) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
#ifdef VP0_DEBUG
device_printf(vpo->vpo_dev, "XPT_SCSI_IO (0x%x) request\n",
- csio->cdb_io.cdb_bytes[0]);
+ scsiio_cdb_ptr(csio));
#endif
-
vpo_intr(vpo, csio);
xpt_done(ccb);
diff --git a/sys/dev/qlxgbe/ql_def.h b/sys/dev/qlxgbe/ql_def.h
index ecc238c..3ffc94c 100644
--- a/sys/dev/qlxgbe/ql_def.h
+++ b/sys/dev/qlxgbe/ql_def.h
@@ -112,6 +112,16 @@ typedef struct _qla_tx_ring {
uint64_t count;
} qla_tx_ring_t;
+typedef struct _qla_tx_fp {
+ struct mtx tx_mtx;
+ char tx_mtx_name[32];
+ struct buf_ring *tx_br;
+ struct task fp_task;
+ struct taskqueue *fp_taskqueue;
+ void *ha;
+ uint32_t txr_idx;
+} qla_tx_fp_t;
+
/*
* Adapter structure contains the hardware independant information of the
* pci function.
@@ -178,10 +188,9 @@ struct qla_host {
qla_tx_ring_t tx_ring[NUM_TX_RINGS];
bus_dma_tag_t tx_tag;
- struct task tx_task;
- struct taskqueue *tx_tq;
struct callout tx_callout;
- struct mtx tx_lock;
+
+ qla_tx_fp_t tx_fp[MAX_SDS_RINGS];
qla_rx_ring_t rx_ring[MAX_RDS_RINGS];
bus_dma_tag_t rx_tag;
diff --git a/sys/dev/qlxgbe/ql_glbl.h b/sys/dev/qlxgbe/ql_glbl.h
index 8f92b0e..beafb41 100644
--- a/sys/dev/qlxgbe/ql_glbl.h
+++ b/sys/dev/qlxgbe/ql_glbl.h
@@ -39,6 +39,7 @@
*/
extern void ql_mbx_isr(void *arg);
extern void ql_isr(void *arg);
+extern uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count);
/*
* from ql_os.c
@@ -66,7 +67,7 @@ extern void qla_reset_promisc(qla_host_t *ha);
extern int ql_set_allmulti(qla_host_t *ha);
extern void qla_reset_allmulti(qla_host_t *ha);
extern void ql_update_link_state(qla_host_t *ha);
-extern void ql_hw_tx_done(qla_host_t *ha);
+extern void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
extern int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id);
extern void ql_hw_stop_rcv(qla_host_t *ha);
extern void ql_get_stats(qla_host_t *ha);
@@ -76,7 +77,7 @@ extern void qla_hw_async_event(qla_host_t *ha);
extern int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
uint32_t *num_rcvq);
-extern int qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp);
+extern int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp);
extern void ql_minidump(qla_host_t *ha);
extern int ql_minidump_init(qla_host_t *ha);
diff --git a/sys/dev/qlxgbe/ql_hw.c b/sys/dev/qlxgbe/ql_hw.c
index 8c40aba..cc4b042a 100644
--- a/sys/dev/qlxgbe/ql_hw.c
+++ b/sys/dev/qlxgbe/ql_hw.c
@@ -51,7 +51,6 @@ static void qla_del_rcv_cntxt(qla_host_t *ha);
static int qla_init_rcv_cntxt(qla_host_t *ha);
static void qla_del_xmt_cntxt(qla_host_t *ha);
static int qla_init_xmt_cntxt(qla_host_t *ha);
-static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
@@ -2047,7 +2046,7 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
ha->hw.iscsi_pkt_count++;
if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
- qla_hw_tx_done_locked(ha, txr_idx);
+ ql_hw_tx_done_locked(ha, txr_idx);
if (hw->tx_cntxt[txr_idx].txr_free <=
(num_tx_cmds + QLA_TX_MIN_FREE)) {
QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
@@ -2552,15 +2551,8 @@ qla_init_rcv_cntxt(qla_host_t *ha)
qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
rcntxt->sds[i].size =
qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
- if (ha->msix_count == 2) {
- rcntxt->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[0]);
- rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
- } else {
- rcntxt->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[i]);
- rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
- }
+ rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
+ rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
}
for (i = 0; i < rcntxt_rds_rings; i++) {
@@ -2672,17 +2664,11 @@ qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
add_rcv->sds[i].size =
qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
- if (ha->msix_count == 2) {
- add_rcv->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[0]);
- add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
- } else {
- add_rcv->sds[i].intr_id =
- qla_host_to_le16(hw->intr_id[j]);
- add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
- }
+ add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
+ add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
}
+
for (i = 0; (i < nsds); i++) {
j = i + sds_idx;
@@ -2803,6 +2789,7 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
q80_rsp_tx_cntxt_t *tcntxt_rsp;
uint32_t err;
qla_hw_tx_cntxt_t *hw_tx_cntxt;
+ uint32_t intr_idx;
hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
@@ -2818,6 +2805,8 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
tcntxt->count_version |= Q8_MBX_CMD_VERSION;
+ intr_idx = txr_idx;
+
#ifdef QL_ENABLE_ISCSI_TLV
tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
@@ -2827,8 +2816,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
tcntxt->traffic_class = 1;
}
-#else
+ intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
+#else
tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
@@ -2841,10 +2831,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
- tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
+ tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
-
hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
@@ -3166,11 +3155,11 @@ ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
}
/*
- * Name: qla_hw_tx_done_locked
+ * Name: ql_hw_tx_done_locked
* Function: Handle Transmit Completions
*/
-static void
-qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
+void
+ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
{
qla_tx_buf_t *txb;
qla_hw_t *hw = &ha->hw;
@@ -3208,34 +3197,6 @@ qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
return;
}
-/*
- * Name: ql_hw_tx_done
- * Function: Handle Transmit Completions
- */
-void
-ql_hw_tx_done(qla_host_t *ha)
-{
- int i;
- uint32_t flag = 0;
-
- if (!mtx_trylock(&ha->tx_lock)) {
- QL_DPRINT8(ha, (ha->pci_dev,
- "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
- return;
- }
- for (i = 0; i < ha->hw.num_tx_rings; i++) {
- qla_hw_tx_done_locked(ha, i);
- if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
- flag = 1;
- }
-
- if (!flag)
- ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- QLA_TX_UNLOCK(ha);
- return;
-}
-
void
ql_update_link_state(qla_host_t *ha)
{
@@ -3655,7 +3616,7 @@ qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
}
int
-qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
+ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
{
struct ether_vlan_header *eh;
uint16_t etype;
diff --git a/sys/dev/qlxgbe/ql_hw.h b/sys/dev/qlxgbe/ql_hw.h
index e50bc5e..37090ff 100644
--- a/sys/dev/qlxgbe/ql_hw.h
+++ b/sys/dev/qlxgbe/ql_hw.h
@@ -1543,7 +1543,6 @@ typedef struct _qla_hw_tx_cntxt {
uint32_t tx_prod_reg;
uint16_t tx_cntxt_id;
- uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
} qla_hw_tx_cntxt_t;
diff --git a/sys/dev/qlxgbe/ql_isr.c b/sys/dev/qlxgbe/ql_isr.c
index ab7f908..19f2d71 100644
--- a/sys/dev/qlxgbe/ql_isr.c
+++ b/sys/dev/qlxgbe/ql_isr.c
@@ -159,7 +159,12 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
ifp->if_ipackets++;
mpf->m_pkthdr.flowid = sgc->rss_hash;
- M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
+
+#if __FreeBSD_version >= 1100000
+ M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
+#else
+ M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
+#endif /* #if __FreeBSD_version >= 1100000 */
(*ifp->if_input)(ifp, mpf);
@@ -450,11 +455,11 @@ qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
}
/*
- * Name: qla_rcv_isr
+ * Name: ql_rcv_isr
* Function: Main Interrupt Service Routine
*/
-static uint32_t
-qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
+uint32_t
+ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
{
device_t dev;
qla_hw_t *hw;
@@ -704,7 +709,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
}
if (ha->flags.stop_rcv)
- goto qla_rcv_isr_exit;
+ goto ql_rcv_isr_exit;
if (hw->sds[sds_idx].sdsr_next != comp_idx) {
QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
@@ -727,7 +732,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
if (opcode)
ret = -1;
-qla_rcv_isr_exit:
+ql_rcv_isr_exit:
hw->sds[sds_idx].rcv_active = 0;
return (ret);
@@ -931,7 +936,7 @@ ql_isr(void *arg)
int idx;
qla_hw_t *hw;
struct ifnet *ifp;
- uint32_t ret = 0;
+ qla_tx_fp_t *fp;
ha = ivec->ha;
hw = &ha->hw;
@@ -940,17 +945,12 @@ ql_isr(void *arg)
if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
return;
- if (idx == 0)
- taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
-
- ret = qla_rcv_isr(ha, idx, -1);
- if (idx == 0)
- taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ fp = &ha->tx_fp[idx];
+
+ if (fp->fp_taskqueue != NULL)
+ taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
- if (!ha->flags.stop_rcv) {
- QL_ENABLE_INTERRUPTS(ha, idx);
- }
return;
}
diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c
index ecccdb81..94ddf7a 100644
--- a/sys/dev/qlxgbe/ql_os.c
+++ b/sys/dev/qlxgbe/ql_os.c
@@ -76,11 +76,11 @@ static void qla_release(qla_host_t *ha);
static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static void qla_stop(qla_host_t *ha);
-static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
-static void qla_tx_done(void *context, int pending);
static void qla_get_peer(qla_host_t *ha);
static void qla_error_recovery(void *context, int pending);
static void qla_async_event(void *context, int pending);
+static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
+ uint32_t iscsi_pdu);
/*
* Hooks to the Operating Systems
@@ -93,7 +93,14 @@ static void qla_init(void *arg);
static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
static int qla_media_change(struct ifnet *ifp);
static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
-static void qla_start(struct ifnet *ifp);
+
+static int qla_transmit(struct ifnet *ifp, struct mbuf *mp);
+static void qla_qflush(struct ifnet *ifp);
+static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
+static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
+static int qla_create_fp_taskqueues(qla_host_t *ha);
+static void qla_destroy_fp_taskqueues(qla_host_t *ha);
+static void qla_drain_fp_taskqueues(qla_host_t *ha);
static device_method_t qla_pci_methods[] = {
/* Device interface */
@@ -225,7 +232,6 @@ qla_watchdog(void *arg)
qla_hw_t *hw;
struct ifnet *ifp;
uint32_t i;
- qla_hw_tx_cntxt_t *hw_tx_cntxt;
hw = &ha->hw;
ifp = ha->ifp;
@@ -254,19 +260,14 @@ qla_watchdog(void *arg)
&ha->async_event_task);
}
- for (i = 0; i < ha->hw.num_tx_rings; i++) {
- hw_tx_cntxt = &hw->tx_cntxt[i];
- if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
- hw_tx_cntxt->txr_comp) {
- taskqueue_enqueue(ha->tx_tq,
- &ha->tx_task);
- break;
- }
- }
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
- if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
- taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ if (fp->fp_taskqueue != NULL)
+ taskqueue_enqueue(fp->fp_taskqueue,
+ &fp->fp_task);
}
+
ha->qla_watchdog_paused = 0;
} else {
ha->qla_watchdog_paused = 0;
@@ -322,9 +323,7 @@ qla_pci_attach(device_t dev)
rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
ha->reg_rid);
- mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_SPIN);
-
- mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
qla_add_sysctls(ha);
ql_hw_add_sysctls(ha);
@@ -344,8 +343,9 @@ qla_pci_attach(device_t dev)
}
QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
- " msix_count 0x%x pci_reg %p\n", __func__, ha,
- ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
+ " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
+ ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
+ ha->pci_reg1));
/* initialize hardware */
if (ql_init_hw(ha)) {
@@ -366,14 +366,15 @@ qla_pci_attach(device_t dev)
goto qla_pci_attach_err;
}
device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
- " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
- ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
+ " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
+ __func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
+ ha->pci_reg, ha->pci_reg1, num_rcvq);
#ifdef QL_ENABLE_ISCSI_TLV
if ((ha->msix_count < 64) || (num_rcvq != 32)) {
ha->hw.num_sds_rings = 15;
- ha->hw.num_tx_rings = 32;
+ ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
}
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
ha->hw.num_rds_rings = ha->hw.num_sds_rings;
@@ -421,8 +422,20 @@ qla_pci_attach(device_t dev)
device_printf(dev, "could not setup interrupt\n");
goto qla_pci_attach_err;
}
+
+ ha->tx_fp[i].ha = ha;
+ ha->tx_fp[i].txr_idx = i;
+
+ if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
+ device_printf(dev, "%s: could not allocate tx_br[%d]\n",
+ __func__, i);
+ goto qla_pci_attach_err;
+ }
}
+ if (qla_create_fp_taskqueues(ha) != 0)
+ goto qla_pci_attach_err;
+
printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
@@ -452,13 +465,6 @@ qla_pci_attach(device_t dev)
ha->flags.qla_watchdog_active = 1;
ha->flags.qla_watchdog_pause = 0;
-
- TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
- ha->tx_tq = taskqueue_create("qla_txq", M_NOWAIT,
- taskqueue_thread_enqueue, &ha->tx_tq);
- taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
- device_get_nameunit(ha->pci_dev));
-
callout_init(&ha->tx_callout, TRUE);
ha->flags.qla_callout_init = 1;
@@ -584,11 +590,6 @@ qla_release(qla_host_t *ha)
taskqueue_free(ha->err_tq);
}
- if (ha->tx_tq) {
- taskqueue_drain(ha->tx_tq, &ha->tx_task);
- taskqueue_free(ha->tx_tq);
- }
-
ql_del_cdev(ha);
if (ha->flags.qla_watchdog_active) {
@@ -626,13 +627,15 @@ qla_release(qla_host_t *ha)
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq);
}
+
+ qla_free_tx_br(ha, &ha->tx_fp[i]);
}
+ qla_destroy_fp_taskqueues(ha);
if (ha->msix_count)
pci_release_msi(dev);
if (ha->flags.lock_init) {
- mtx_destroy(&ha->tx_lock);
mtx_destroy(&ha->hw_lock);
}
@@ -812,7 +815,9 @@ qla_init_ifnet(device_t dev, qla_host_t *ha)
ifp->if_softc = ha;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = qla_ioctl;
- ifp->if_start = qla_start;
+
+ ifp->if_transmit = qla_transmit;
+ ifp->if_qflush = qla_qflush;
IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
@@ -822,12 +827,13 @@ qla_init_ifnet(device_t dev, qla_host_t *ha)
ether_ifattach(ifp, qla_get_mac_addr(ha));
- ifp->if_capabilities = IFCAP_HWCSUM |
+ ifp->if_capabilities |= IFCAP_HWCSUM |
IFCAP_TSO4 |
- IFCAP_JUMBO_MTU;
-
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+ IFCAP_JUMBO_MTU |
+ IFCAP_VLAN_HWTAGGING |
+ IFCAP_VLAN_MTU |
+ IFCAP_VLAN_HWTSO |
+ IFCAP_LRO;
ifp->if_capenable = ifp->if_capabilities;
@@ -922,10 +928,13 @@ qla_set_multi(qla_host_t *ha, uint32_t add_multi)
if_maddr_runlock(ifp);
- if (QLA_LOCK(ha, __func__, 1) == 0) {
- ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
- QLA_UNLOCK(ha, __func__);
- }
+ //if (QLA_LOCK(ha, __func__, 1) == 0) {
+ // ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
+ // QLA_UNLOCK(ha, __func__);
+ //}
+ QLA_LOCK(ha, __func__, 1);
+ ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
+ QLA_UNLOCK(ha, __func__);
return (ret);
}
@@ -1130,64 +1139,10 @@ qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
return;
}
-static void
-qla_start(struct ifnet *ifp)
-{
- struct mbuf *m_head;
- qla_host_t *ha = (qla_host_t *)ifp->if_softc;
-
- QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
-
- if (!mtx_trylock(&ha->tx_lock)) {
- QL_DPRINT8(ha, (ha->pci_dev,
- "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
- return;
- }
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING) {
- QL_DPRINT8(ha,
- (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
- QLA_TX_UNLOCK(ha);
- return;
- }
-
- if (!ha->hw.link_up || !ha->watchdog_ticks)
- ql_update_link_state(ha);
-
- if (!ha->hw.link_up) {
- QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
- QLA_TX_UNLOCK(ha);
- return;
- }
-
- while (ifp->if_snd.ifq_head != NULL) {
- IF_DEQUEUE(&ifp->if_snd, m_head);
-
- if (m_head == NULL) {
- QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
- __func__));
- break;
- }
-
- if (qla_send(ha, &m_head)) {
- if (m_head == NULL)
- break;
- QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
- }
- QLA_TX_UNLOCK(ha);
- QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
- return;
-}
static int
-qla_send(qla_host_t *ha, struct mbuf **m_headp)
+qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
+ uint32_t iscsi_pdu)
{
bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
bus_dmamap_t map;
@@ -1195,29 +1150,9 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp)
int ret = -1;
uint32_t tx_idx;
struct mbuf *m_head = *m_headp;
- uint32_t txr_idx = ha->txr_idx;
- uint32_t iscsi_pdu = 0;
QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
- /* check if flowid is set */
-
- if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
-#ifdef QL_ENABLE_ISCSI_TLV
- if (qla_iscsi_pdu(ha, m_head) == 0) {
- iscsi_pdu = 1;
- txr_idx = m_head->m_pkthdr.flowid &
- ((ha->hw.num_tx_rings >> 1) - 1);
- } else {
- txr_idx = m_head->m_pkthdr.flowid &
- (ha->hw.num_tx_rings - 1);
- }
-#else
- txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
-#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
- }
-
-
tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
@@ -1295,16 +1230,302 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp)
return (ret);
}
+static int
+qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
+{
+ snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
+ "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
+
+ mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
+
+ fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
+ M_NOWAIT, &fp->tx_mtx);
+ if (fp->tx_br == NULL) {
+ QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
+ " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
+ return (-ENOMEM);
+ }
+ return 0;
+}
+
+static void
+qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
+{
+ struct mbuf *mp;
+ struct ifnet *ifp = ha->ifp;
+
+ if (mtx_initialized(&fp->tx_mtx)) {
+
+ if (fp->tx_br != NULL) {
+
+ mtx_lock(&fp->tx_mtx);
+
+ while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
+ m_freem(mp);
+ }
+
+ mtx_unlock(&fp->tx_mtx);
+
+ buf_ring_free(fp->tx_br, M_DEVBUF);
+ fp->tx_br = NULL;
+ }
+ mtx_destroy(&fp->tx_mtx);
+ }
+ return;
+}
+
+static void
+qla_fp_taskqueue(void *context, int pending)
+{
+ qla_tx_fp_t *fp;
+ qla_host_t *ha;
+ struct ifnet *ifp;
+ struct mbuf *mp;
+ int ret;
+ uint32_t txr_idx;
+ uint32_t iscsi_pdu = 0;
+ uint32_t rx_pkts_left;
+
+ fp = context;
+
+ if (fp == NULL)
+ return;
+
+ ha = (qla_host_t *)fp->ha;
+
+ ifp = ha->ifp;
+
+ txr_idx = fp->txr_idx;
+
+ mtx_lock(&fp->tx_mtx);
+
+ if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
+ mtx_unlock(&fp->tx_mtx);
+ goto qla_fp_taskqueue_exit;
+ }
+
+ rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
+
+#ifdef QL_ENABLE_ISCSI_TLV
+ ql_hw_tx_done_locked(ha, fp->txr_idx);
+ ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
+ txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1);
+#else
+ ql_hw_tx_done_locked(ha, fp->txr_idx);
+#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
+
+ mp = drbr_peek(ifp, fp->tx_br);
+
+ while (mp != NULL) {
+
+ if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
+#ifdef QL_ENABLE_ISCSI_TLV
+ if (ql_iscsi_pdu(ha, mp) == 0) {
+ iscsi_pdu = 1;
+ }
+#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
+ }
+
+ ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
+
+ if (ret) {
+ if (mp != NULL)
+ drbr_putback(ifp, fp->tx_br, mp);
+ else {
+ drbr_advance(ifp, fp->tx_br);
+ }
+
+ mtx_unlock(&fp->tx_mtx);
+
+ goto qla_fp_taskqueue_exit0;
+ } else {
+ drbr_advance(ifp, fp->tx_br);
+ }
+
+ mp = drbr_peek(ifp, fp->tx_br);
+ }
+
+ mtx_unlock(&fp->tx_mtx);
+
+qla_fp_taskqueue_exit0:
+
+ if (rx_pkts_left || ((mp != NULL) && ret)) {
+ taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
+ } else {
+ if (!ha->flags.stop_rcv) {
+ QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
+ }
+ }
+
+qla_fp_taskqueue_exit:
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
+ return;
+}
+
+static int
+qla_create_fp_taskqueues(qla_host_t *ha)
+{
+ int i;
+ uint8_t tq_name[32];
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+ bzero(tq_name, sizeof (tq_name));
+ snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
+
+ TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
+
+ fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
+ taskqueue_thread_enqueue,
+ &fp->fp_taskqueue);
+
+ if (fp->fp_taskqueue == NULL)
+ return (-1);
+
+ taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
+ tq_name);
+
+ QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
+ fp->fp_taskqueue));
+ }
+
+ return (0);
+}
+
+static void
+qla_destroy_fp_taskqueues(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+ if (fp->fp_taskqueue != NULL) {
+ taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
+ taskqueue_free(fp->fp_taskqueue);
+ fp->fp_taskqueue = NULL;
+ }
+ }
+ return;
+}
+
+static void
+qla_drain_fp_taskqueues(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp = &ha->tx_fp[i];
+
+ if (fp->fp_taskqueue != NULL) {
+ taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
+ }
+ }
+ return;
+}
+
+static int
+qla_transmit(struct ifnet *ifp, struct mbuf *mp)
+{
+ qla_host_t *ha = (qla_host_t *)ifp->if_softc;
+ qla_tx_fp_t *fp;
+ int rss_id = 0;
+ int ret = 0;
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
+
+#if __FreeBSD_version >= 1100000
+ if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
+#else
+ if (mp->m_flags & M_FLOWID)
+#endif
+ rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
+ ha->hw.num_sds_rings;
+ fp = &ha->tx_fp[rss_id];
+
+ if (fp->tx_br == NULL) {
+ ret = EINVAL;
+ goto qla_transmit_exit;
+ }
+
+ if (mp != NULL) {
+ ret = drbr_enqueue(ifp, fp->tx_br, mp);
+ }
+
+ if (fp->fp_taskqueue != NULL)
+ taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
+
+ ret = 0;
+
+qla_transmit_exit:
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
+ return ret;
+}
+
+static void
+qla_qflush(struct ifnet *ifp)
+{
+ int i;
+ qla_tx_fp_t *fp;
+ struct mbuf *mp;
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+
+ fp = &ha->tx_fp[i];
+
+ if (fp == NULL)
+ continue;
+
+ if (fp->tx_br) {
+ mtx_lock(&fp->tx_mtx);
+
+ while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
+ m_freem(mp);
+ }
+ mtx_unlock(&fp->tx_mtx);
+ }
+ }
+ QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
+
+ return;
+}
+
+
static void
qla_stop(qla_host_t *ha)
{
struct ifnet *ifp = ha->ifp;
device_t dev;
+ int i = 0;
dev = ha->pci_dev;
ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
- QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp;
+
+ fp = &ha->tx_fp[i];
+
+ if (fp == NULL)
+ continue;
+
+ if (fp->tx_br != NULL) {
+ mtx_lock(&fp->tx_mtx);
+ mtx_unlock(&fp->tx_mtx);
+ }
+ }
ha->flags.qla_watchdog_pause = 1;
@@ -1313,6 +1534,8 @@ qla_stop(qla_host_t *ha)
ha->flags.qla_interface_up = 0;
+ qla_drain_fp_taskqueues(ha);
+
ql_hw_stop_rcv(ha);
ql_del_hw_if(ha);
@@ -1653,25 +1876,6 @@ exit_ql_get_mbuf:
return (ret);
}
-static void
-qla_tx_done(void *context, int pending)
-{
- qla_host_t *ha = context;
- struct ifnet *ifp;
-
- ifp = ha->ifp;
-
- if (!ifp)
- return;
-
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
- return;
- }
- ql_hw_tx_done(ha);
-
- qla_start(ha->ifp);
-}
static void
qla_get_peer(qla_host_t *ha)
@@ -1714,18 +1918,32 @@ qla_error_recovery(void *context, int pending)
qla_host_t *ha = context;
uint32_t msecs_100 = 100;
struct ifnet *ifp = ha->ifp;
+ int i = 0;
(void)QLA_LOCK(ha, __func__, 0);
if (ha->flags.qla_interface_up) {
- ha->hw.imd_compl = 1;
- qla_mdelay(__func__, 300);
+ ha->hw.imd_compl = 1;
+ qla_mdelay(__func__, 300);
- ql_hw_stop_rcv(ha);
+ ql_hw_stop_rcv(ha);
- ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
- QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha);
+ ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ qla_tx_fp_t *fp;
+
+ fp = &ha->tx_fp[i];
+
+ if (fp == NULL)
+ continue;
+
+ if (fp->tx_br != NULL) {
+ mtx_lock(&fp->tx_mtx);
+ mtx_unlock(&fp->tx_mtx);
+ }
+ }
}
QLA_UNLOCK(ha, __func__);
diff --git a/sys/dev/qlxgbe/ql_os.h b/sys/dev/qlxgbe/ql_os.h
index 3ba6c3f..db71772 100644
--- a/sys/dev/qlxgbe/ql_os.h
+++ b/sys/dev/qlxgbe/ql_os.h
@@ -147,8 +147,8 @@ MALLOC_DECLARE(M_QLA83XXBUF);
/*
* Locks
*/
-#define QLA_LOCK(ha, str, no_delay) qla_lock(ha, str, no_delay)
-#define QLA_UNLOCK(ha, str) qla_unlock(ha, str)
+#define QLA_LOCK(ha, str, no_delay) mtx_lock(&ha->hw_lock)
+#define QLA_UNLOCK(ha, str) mtx_unlock(&ha->hw_lock)
#define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock);
#define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock);
diff --git a/sys/dev/qlxgbe/ql_ver.h b/sys/dev/qlxgbe/ql_ver.h
index 182fa32..90d61d2 100644
--- a/sys/dev/qlxgbe/ql_ver.h
+++ b/sys/dev/qlxgbe/ql_ver.h
@@ -36,6 +36,6 @@
#define QLA_VERSION_MAJOR 3
#define QLA_VERSION_MINOR 10
-#define QLA_VERSION_BUILD 31
+#define QLA_VERSION_BUILD 33
#endif /* #ifndef _QL_VER_H_ */
diff --git a/sys/dev/rl/if_rl.c b/sys/dev/rl/if_rl.c
index 2ff1310..2f877c9 100644
--- a/sys/dev/rl/if_rl.c
+++ b/sys/dev/rl/if_rl.c
@@ -1938,15 +1938,13 @@ rl_stop(struct rl_softc *sc)
*/
for (i = 0; i < RL_TX_LIST_CNT; i++) {
if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
- if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
- bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
- sc->rl_cdata.rl_tx_dmamap[i],
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
- sc->rl_cdata.rl_tx_dmamap[i]);
- m_freem(sc->rl_cdata.rl_tx_chain[i]);
- sc->rl_cdata.rl_tx_chain[i] = NULL;
- }
+ bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
+ sc->rl_cdata.rl_tx_dmamap[i],
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
+ sc->rl_cdata.rl_tx_dmamap[i]);
+ m_freem(sc->rl_cdata.rl_tx_chain[i]);
+ sc->rl_cdata.rl_tx_chain[i] = NULL;
CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
0x0000000);
}
diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c
index 47d6340..d1b0ecf 100644
--- a/sys/dev/sdhci/sdhci.c
+++ b/sys/dev/sdhci/sdhci.c
@@ -74,6 +74,7 @@ static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock);
static void sdhci_start(struct sdhci_slot *slot);
static void sdhci_start_data(struct sdhci_slot *slot, struct mmc_data *data);
+static void sdhci_card_poll(void *);
static void sdhci_card_task(void *, int);
/* helper routines */
@@ -90,6 +91,22 @@ static void sdhci_card_task(void *, int);
#define SDHCI_200_MAX_DIVIDER 256
#define SDHCI_300_MAX_DIVIDER 2046
+#define SDHCI_CARD_PRESENT_TICKS (hz / 5)
+#define SDHCI_INSERT_DELAY_TICKS (hz / 2)
+
+/*
+ * Broadcom BCM577xx Controller Constants
+ */
+#define BCM577XX_DEFAULT_MAX_DIVIDER 256 /* Maximum divider supported by the default clock source. */
+#define BCM577XX_ALT_CLOCK_BASE 63000000 /* Alternative clock's base frequency. */
+
+#define BCM577XX_HOST_CONTROL 0x198
+#define BCM577XX_CTRL_CLKSEL_MASK 0xFFFFCFFF
+#define BCM577XX_CTRL_CLKSEL_SHIFT 12
+#define BCM577XX_CTRL_CLKSEL_DEFAULT 0x0
+#define BCM577XX_CTRL_CLKSEL_64MHZ 0x3
+
+
static void
sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
@@ -152,8 +169,7 @@ sdhci_reset(struct sdhci_slot *slot, uint8_t mask)
int timeout;
if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!(RD4(slot, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT))
+ if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot))
return;
}
@@ -218,10 +234,15 @@ sdhci_init(struct sdhci_slot *slot)
slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
- SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
SDHCI_INT_ACMD12ERR;
+
+ if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) &&
+ !(slot->opt & SDHCI_NON_REMOVABLE)) {
+ slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+ }
+
WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
}
@@ -229,6 +250,8 @@ sdhci_init(struct sdhci_slot *slot)
static void
sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock)
{
+ uint32_t clk_base;
+ uint32_t clk_sel;
uint32_t res;
uint16_t clk;
uint16_t div;
@@ -244,6 +267,22 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock)
/* If no clock requested - left it so. */
if (clock == 0)
return;
+
+ /* Determine the clock base frequency */
+ clk_base = slot->max_clk;
+ if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) {
+ clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) & BCM577XX_CTRL_CLKSEL_MASK;
+
+ /* Select clock source appropriate for the requested frequency. */
+ if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) {
+ clk_base = BCM577XX_ALT_CLOCK_BASE;
+ clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ << BCM577XX_CTRL_CLKSEL_SHIFT);
+ } else {
+ clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT << BCM577XX_CTRL_CLKSEL_SHIFT);
+ }
+
+ WR2(slot, BCM577XX_HOST_CONTROL, clk_sel);
+ }
/* Recalculate timeout clock frequency based on the new sd clock. */
if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
@@ -251,7 +290,7 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock)
if (slot->version < SDHCI_SPEC_300) {
/* Looking for highest freq <= clock. */
- res = slot->max_clk;
+ res = clk_base;
for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) {
if (res <= clock)
break;
@@ -262,11 +301,11 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock)
}
else {
/* Version 3.0 divisors are multiples of two up to 1023*2 */
- if (clock >= slot->max_clk)
+ if (clock >= clk_base)
div = 0;
else {
for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) {
- if ((slot->max_clk / div) <= clock)
+ if ((clk_base / div) <= clock)
break;
}
}
@@ -274,8 +313,8 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock)
}
if (bootverbose || sdhci_debug)
- slot_printf(slot, "Divider %d for freq %d (max %d)\n",
- div, clock, slot->max_clk);
+ slot_printf(slot, "Divider %d for freq %d (base %d)\n",
+ div, clock, clk_base);
/* Now we have got divider, set it. */
clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT;
@@ -338,6 +377,13 @@ sdhci_set_power(struct sdhci_slot *slot, u_char power)
/* Turn on the power. */
pwr |= SDHCI_POWER_ON;
WR1(slot, SDHCI_POWER_CONTROL, pwr);
+
+ if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) {
+ WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10);
+ DELAY(10);
+ WR1(slot, SDHCI_POWER_CONTROL, pwr);
+ DELAY(300);
+ }
}
static void
@@ -445,23 +491,17 @@ sdhci_transfer_pio(struct sdhci_slot *slot)
}
}
-static void
-sdhci_card_delay(void *arg)
-{
- struct sdhci_slot *slot = arg;
-
- taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task);
-}
-
static void
sdhci_card_task(void *arg, int pending)
{
struct sdhci_slot *slot = arg;
SDHCI_LOCK(slot);
- if (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) {
+ if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) {
if (slot->dev == NULL) {
/* If card is present - attach mmc bus. */
+ if (bootverbose || sdhci_debug)
+ slot_printf(slot, "Card inserted\n");
slot->dev = device_add_child(slot->bus, "mmc", -1);
device_set_ivars(slot->dev, slot);
SDHCI_UNLOCK(slot);
@@ -471,6 +511,8 @@ sdhci_card_task(void *arg, int pending)
} else {
if (slot->dev != NULL) {
/* If no card present - detach mmc bus. */
+ if (bootverbose || sdhci_debug)
+ slot_printf(slot, "Card removed\n");
device_t d = slot->dev;
slot->dev = NULL;
SDHCI_UNLOCK(slot);
@@ -480,6 +522,51 @@ sdhci_card_task(void *arg, int pending)
}
}
+static void
+sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present)
+{
+ bool was_present;
+
+ /*
+ * If there was no card and now there is one, schedule the task to
+ * create the child device after a short delay. The delay is to
+ * debounce the card insert (sometimes the card detect pin stabilizes
+ * before the other pins have made good contact).
+ *
+ * If there was a card present and now it's gone, immediately schedule
+ * the task to delete the child device. No debouncing -- gone is gone,
+ * because once power is removed, a full card re-init is needed, and
+ * that happens by deleting and recreating the child device.
+ */
+ was_present = slot->dev != NULL;
+ if (!was_present && is_present) {
+ taskqueue_enqueue_timeout(taskqueue_swi_giant,
+ &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS);
+ } else if (was_present && !is_present) {
+ taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task);
+ }
+}
+
+void
+sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present)
+{
+
+ SDHCI_LOCK(slot);
+ sdhci_handle_card_present_locked(slot, is_present);
+ SDHCI_UNLOCK(slot);
+}
+
+static void
+sdhci_card_poll(void *arg)
+{
+ struct sdhci_slot *slot = arg;
+
+ sdhci_handle_card_present(slot,
+ SDHCI_GET_CARD_PRESENT(slot->bus, slot));
+ callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS,
+ sdhci_card_poll, slot);
+}
+
int
sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
{
@@ -550,9 +637,11 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
device_printf(dev, "Hardware doesn't specify base clock "
"frequency, using %dMHz as default.\n", SDHCI_DEFAULT_MAX_FREQ);
}
- /* Calculate timeout clock frequency. */
+ /* Calculate/set timeout clock frequency. */
if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) {
slot->timeout_clk = slot->max_clk / 1000;
+ } else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) {
+ slot->timeout_clk = 1000;
} else {
slot->timeout_clk =
(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
@@ -596,6 +685,8 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
slot->opt &= ~SDHCI_HAVE_DMA;
if (slot->quirks & SDHCI_QUIRK_FORCE_DMA)
slot->opt |= SDHCI_HAVE_DMA;
+ if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE)
+ slot->opt |= SDHCI_NON_REMOVABLE;
/*
* Use platform-provided transfer backend
@@ -608,18 +699,33 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
slot_printf(slot, "%uMHz%s %s%s%s%s %s\n",
slot->max_clk / 1000000,
(caps & SDHCI_CAN_DO_HISPD) ? " HS" : "",
- (caps & MMC_CAP_8_BIT_DATA) ? "8bits" :
- ((caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"),
+ (slot->host.caps & MMC_CAP_8_BIT_DATA) ? "8bits" :
+ ((slot->host.caps & MMC_CAP_4_BIT_DATA) ? "4bits" :
+ "1bit"),
(caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "",
(caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "",
(caps & SDHCI_CAN_VDD_180) ? " 1.8V" : "",
(slot->opt & SDHCI_HAVE_DMA) ? "DMA" : "PIO");
sdhci_dumpregs(slot);
}
-
+
+ slot->timeout = 10;
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO,
+ "timeout", CTLFLAG_RW, &slot->timeout, 0,
+ "Maximum timeout for SDHCI transfers (in secs)");
TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot);
- callout_init(&slot->card_callout, 1);
+ TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0,
+ sdhci_card_task, slot);
+ callout_init(&slot->card_poll_callout, 1);
callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0);
+
+ if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) &&
+ !(slot->opt & SDHCI_NON_REMOVABLE)) {
+ callout_reset(&slot->card_poll_callout,
+ SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot);
+ }
+
return (0);
}
@@ -635,8 +741,9 @@ sdhci_cleanup_slot(struct sdhci_slot *slot)
device_t d;
callout_drain(&slot->timeout_callout);
- callout_drain(&slot->card_callout);
+ callout_drain(&slot->card_poll_callout);
taskqueue_drain(taskqueue_swi_giant, &slot->card_task);
+ taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task);
SDHCI_LOCK(slot);
d = slot->dev;
@@ -682,6 +789,16 @@ sdhci_generic_min_freq(device_t brdev, struct sdhci_slot *slot)
return (slot->max_clk / SDHCI_200_MAX_DIVIDER);
}
+bool
+sdhci_generic_get_card_present(device_t brdev, struct sdhci_slot *slot)
+{
+
+ if (slot->opt & SDHCI_NON_REMOVABLE)
+ return true;
+
+ return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
int
sdhci_generic_update_ios(device_t brdev, device_t reqdev)
{
@@ -779,7 +896,7 @@ static void
sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
{
int flags, timeout;
- uint32_t mask, state;
+ uint32_t mask;
slot->curcmd = cmd;
slot->cmd_done = 0;
@@ -794,11 +911,9 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
return;
}
- /* Read controller present state. */
- state = RD4(slot, SDHCI_PRESENT_STATE);
/* Do not issue command if there is no card, clock or power.
* Controller will not detect timeout without clock active. */
- if ((state & SDHCI_CARD_PRESENT) == 0 ||
+ if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) ||
slot->power == 0 ||
slot->clock == 0) {
cmd->error = MMC_ERR_FAILED;
@@ -824,7 +939,7 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
* (It's usually more like 20-30ms in the real world.)
*/
timeout = 250;
- while (state & mask) {
+ while (mask & RD4(slot, SDHCI_PRESENT_STATE)) {
if (timeout == 0) {
slot_printf(slot, "Controller never released "
"inhibit bit(s).\n");
@@ -835,7 +950,6 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
}
timeout--;
DELAY(1000);
- state = RD4(slot, SDHCI_PRESENT_STATE);
}
/* Prepare command flags. */
@@ -873,7 +987,8 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
/* Start command. */
WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff));
/* Start timeout callout. */
- callout_reset(&slot->timeout_callout, 2*hz, sdhci_timeout, slot);
+ callout_reset(&slot->timeout_callout, slot->timeout * hz,
+ sdhci_timeout, slot);
}
static void
@@ -1272,7 +1387,7 @@ sdhci_acmd_irq(struct sdhci_slot *slot)
void
sdhci_generic_intr(struct sdhci_slot *slot)
{
- uint32_t intmask;
+ uint32_t intmask, present;
SDHCI_LOCK(slot);
/* Read slot interrupt status. */
@@ -1286,22 +1401,16 @@ sdhci_generic_intr(struct sdhci_slot *slot)
/* Handle card presence interrupts. */
if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ present = (intmask & SDHCI_INT_CARD_INSERT) != 0;
+ slot->intmask &=
+ ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ slot->intmask |= present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT;
+ WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
+ WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
WR4(slot, SDHCI_INT_STATUS, intmask &
(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE));
-
- if (intmask & SDHCI_INT_CARD_REMOVE) {
- if (bootverbose || sdhci_debug)
- slot_printf(slot, "Card removed\n");
- callout_stop(&slot->card_callout);
- taskqueue_enqueue(taskqueue_swi_giant,
- &slot->card_task);
- }
- if (intmask & SDHCI_INT_CARD_INSERT) {
- if (bootverbose || sdhci_debug)
- slot_printf(slot, "Card inserted\n");
- callout_reset(&slot->card_callout, hz / 2,
- sdhci_card_delay, slot);
- }
+ sdhci_handle_card_present_locked(slot, present);
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
}
/* Handle command interrupts. */
diff --git a/sys/dev/sdhci/sdhci.h b/sys/dev/sdhci/sdhci.h
index b7d1960..4626816 100644
--- a/sys/dev/sdhci/sdhci.h
+++ b/sys/dev/sdhci/sdhci.h
@@ -63,6 +63,16 @@
#define SDHCI_QUIRK_WAITFOR_RESET_ASSERTED (1<<14)
/* Leave controller in standard mode when putting card in HS mode. */
#define SDHCI_QUIRK_DONT_SET_HISPD_BIT (1<<15)
+/* Alternate clock source is required when supplying a 400 KHz clock. */
+#define SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC (1<<16)
+/* Card insert/remove interrupts don't work, polling required. */
+#define SDHCI_QUIRK_POLL_CARD_PRESENT (1<<17)
+/* All controller slots are non-removable. */
+#define SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE (1<<18)
+/* Issue custom Intel controller reset sequence after power-up. */
+#define SDHCI_QUIRK_INTEL_POWER_UP_RESET (1<<19)
+/* Data timeout is invalid, use 1 MHz clock instead. */
+#define SDHCI_QUIRK_DATA_TIMEOUT_1MHZ (1<<20)
/*
* Controller registers
@@ -151,6 +161,9 @@
#define SDHCI_CLOCK_CARD_EN 0x0004
#define SDHCI_CLOCK_INT_STABLE 0x0002
#define SDHCI_CLOCK_INT_EN 0x0001
+#define SDHCI_DIVIDERS_MASK \
+ ((SDHCI_DIVIDER_MASK << SDHCI_DIVIDER_SHIFT) | \
+ (SDHCI_DIVIDER_HI_MASK << SDHCI_DIVIDER_HI_SHIFT))
#define SDHCI_TIMEOUT_CONTROL 0x2E
@@ -268,9 +281,11 @@ struct sdhci_slot {
device_t dev; /* Slot device */
u_char num; /* Slot number */
u_char opt; /* Slot options */
-#define SDHCI_HAVE_DMA 1
-#define SDHCI_PLATFORM_TRANSFER 2
+#define SDHCI_HAVE_DMA 0x01
+#define SDHCI_PLATFORM_TRANSFER 0x02
+#define SDHCI_NON_REMOVABLE 0x04
u_char version;
+ int timeout; /* Transfer timeout */
uint32_t max_clk; /* Max possible freq */
uint32_t timeout_clk; /* Timeout freq */
bus_dma_tag_t dmatag;
@@ -278,7 +293,9 @@ struct sdhci_slot {
u_char *dmamem;
bus_addr_t paddr; /* DMA buffer address */
struct task card_task; /* Card presence check task */
- struct callout card_callout; /* Card insert delay callout */
+ struct timeout_task
+ card_delayed_task;/* Card insert delayed task */
+ struct callout card_poll_callout;/* Card present polling callout */
struct callout timeout_callout;/* Card command/data response timeout */
struct mmc_host host; /* Host parameters */
struct mmc_request *req; /* Current request */
@@ -316,5 +333,7 @@ int sdhci_generic_acquire_host(device_t brdev, device_t reqdev);
int sdhci_generic_release_host(device_t brdev, device_t reqdev);
void sdhci_generic_intr(struct sdhci_slot *slot);
uint32_t sdhci_generic_min_freq(device_t brdev, struct sdhci_slot *slot);
+bool sdhci_generic_get_card_present(device_t brdev, struct sdhci_slot *slot);
+void sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present);
#endif /* __SDHCI_H__ */
diff --git a/sys/dev/sdhci/sdhci_if.m b/sys/dev/sdhci/sdhci_if.m
index b33cdcf..da02d31 100644
--- a/sys/dev/sdhci/sdhci_if.m
+++ b/sys/dev/sdhci/sdhci_if.m
@@ -152,3 +152,9 @@ METHOD uint32_t min_freq {
device_t brdev;
struct sdhci_slot *slot;
} DEFAULT sdhci_generic_min_freq;
+
+METHOD bool get_card_present {
+ device_t brdev;
+ struct sdhci_slot *slot;
+} DEFAULT sdhci_generic_get_card_present;
+
diff --git a/sys/dev/sdhci/sdhci_pci.c b/sys/dev/sdhci/sdhci_pci.c
index b13a0c9..d8e8897 100644
--- a/sys/dev/sdhci/sdhci_pci.c
+++ b/sys/dev/sdhci/sdhci_pci.c
@@ -63,15 +63,15 @@ __FBSDID("$FreeBSD$");
#define PCI_SDHCI_IFVENDOR 0x02
#define PCI_SLOT_INFO 0x40 /* 8 bits */
-#define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1)
-#define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7)
+#define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1)
+#define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7)
/*
* RICOH specific PCI registers
*/
#define SDHC_PCI_MODE_KEY 0xf9
#define SDHC_PCI_MODE 0x150
-#define SDHC_PCI_MODE_SD20 0x10
+#define SDHC_PCI_MODE_SD20 0x10
#define SDHC_PCI_BASE_FREQ_KEY 0xfc
#define SDHC_PCI_BASE_FREQ 0xe1
@@ -105,6 +105,21 @@ static const struct sdhci_device {
{ 0x2381197B, 0xffff, "JMicron JMB38X SD",
SDHCI_QUIRK_32BIT_DMA_SIZE |
SDHCI_QUIRK_RESET_AFTER_REQUEST },
+ { 0x16bc14e4, 0xffff, "Broadcom BCM577xx SDXC/MMC Card Reader",
+ SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC },
+ { 0x0f148086, 0xffff, "Intel Bay Trail eMMC 4.5 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { 0x0f508086, 0xffff, "Intel Bay Trail eMMC 4.5 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { 0x22948086, 0xffff, "Intel Braswell eMMC 4.5.1 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_DATA_TIMEOUT_1MHZ |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
+ { 0x5acc8086, 0xffff, "Intel Apollo Lake eMMC 5.0 Controller",
+ SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE |
+ SDHCI_QUIRK_INTEL_POWER_UP_RESET },
{ 0, 0xffff, NULL,
0 }
};
@@ -117,8 +132,8 @@ struct sdhci_pci_softc {
int num_slots; /* Number of slots on this controller */
struct sdhci_slot slots[6];
struct resource *mem_res[6]; /* Memory resource */
- uint8_t cfg_freq; /* Saved mode */
- uint8_t cfg_mode; /* Saved frequency */
+ uint8_t cfg_freq; /* Saved frequency */
+ uint8_t cfg_mode; /* Saved mode */
};
static int sdhci_enable_msi = 1;
@@ -329,12 +344,14 @@ sdhci_pci_attach(device_t dev)
/* Allocate memory. */
rid = PCIR_BAR(bar + i);
- sc->mem_res[i] = bus_alloc_resource(dev, SYS_RES_MEMORY,
- &rid, 0ul, ~0ul, 0x100, RF_ACTIVE);
+ sc->mem_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
if (sc->mem_res[i] == NULL) {
device_printf(dev, "Can't allocate memory for slot %d\n", i);
continue;
}
+
+ slot->quirks = sc->quirks;
if (sdhci_init_slot(dev, slot, i) != 0)
continue;
@@ -409,11 +426,16 @@ static int
sdhci_pci_resume(device_t dev)
{
struct sdhci_pci_softc *sc = device_get_softc(dev);
- int i;
+ int i, err;
for (i = 0; i < sc->num_slots; i++)
sdhci_generic_resume(&sc->slots[i]);
- return (bus_generic_resume(dev));
+ err = bus_generic_resume(dev);
+ if (err)
+ return (err);
+ if (sc->quirks & SDHCI_QUIRK_LOWER_FREQUENCY)
+ sdhci_lower_frequency(dev);
+ return (0);
}
static void
@@ -442,11 +464,11 @@ static device_method_t sdhci_methods[] = {
DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar),
/* mmcbr_if */
- DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
- DEVMETHOD(mmcbr_request, sdhci_generic_request),
- DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro),
- DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
- DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
+ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
+ DEVMETHOD(mmcbr_request, sdhci_generic_request),
+ DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro),
+ DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
+ DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
/* SDHCI registers accessors */
DEVMETHOD(sdhci_read_1, sdhci_pci_read_1),
diff --git a/sys/dev/sfxge/common/efx_mcdi.c b/sys/dev/sfxge/common/efx_mcdi.c
index e4a918a..4e7ff53 100644
--- a/sys/dev/sfxge/common/efx_mcdi.c
+++ b/sys/dev/sfxge/common/efx_mcdi.c
@@ -1725,7 +1725,8 @@ static __checkReturn efx_rc_t
efx_mcdi_mac_stats(
__in efx_nic_t *enp,
__in_opt efsys_mem_t *esmp,
- __in efx_stats_action_t action)
+ __in efx_stats_action_t action,
+ __in uint16_t period_ms)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
@@ -1750,7 +1751,7 @@ efx_mcdi_mac_stats(
MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
MAC_STATS_IN_PERIODIC_NOEVENT, !events,
- MAC_STATS_IN_PERIOD_MS, (enable | events) ? 1000 : 0);
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
if (esmp != NULL) {
int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
@@ -1800,7 +1801,7 @@ efx_mcdi_mac_stats_clear(
{
efx_rc_t rc;
- if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR)) != 0)
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
goto fail1;
return (0);
@@ -1823,7 +1824,7 @@ efx_mcdi_mac_stats_upload(
* avoid having to pull the statistics buffer into the cache to
* maintain cumulative statistics.
*/
- if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD)) != 0)
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
goto fail1;
return (0);
@@ -1838,7 +1839,7 @@ fail1:
efx_mcdi_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
- __in uint16_t period,
+ __in uint16_t period_ms,
__in boolean_t events)
{
efx_rc_t rc;
@@ -1847,14 +1848,17 @@ efx_mcdi_mac_stats_periodic(
* The MC DMAs aggregate statistics for our convenience, so we can
* avoid having to pull the statistics buffer into the cache to
* maintain cumulative statistics.
- * Huntington uses a fixed 1sec period, so use that on Siena too.
+ * Huntington uses a fixed 1sec period.
+ * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
*/
- if (period == 0)
- rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE);
+ if (period_ms == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
else if (events)
- rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS);
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
+ period_ms);
else
- rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS);
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
+ period_ms);
if (rc != 0)
goto fail1;
diff --git a/sys/dev/sfxge/common/efx_mcdi.h b/sys/dev/sfxge/common/efx_mcdi.h
index ffa50f1..ee11789 100644
--- a/sys/dev/sfxge/common/efx_mcdi.h
+++ b/sys/dev/sfxge/common/efx_mcdi.h
@@ -218,7 +218,7 @@ extern __checkReturn efx_rc_t
efx_mcdi_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
- __in uint16_t period,
+ __in uint16_t period_ms,
__in boolean_t events);
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index 8b9edb0..f91275e 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -94,14 +94,6 @@ SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
&sfxge_tx_ring_entries, 0,
"Maximum number of descriptors in a transmit ring");
-#define SFXGE_PARAM_STATS_UPDATE_PERIOD SFXGE_PARAM(stats_update_period)
-static int sfxge_stats_update_period = SFXGE_CALLOUT_TICKS;
-TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD,
- &sfxge_stats_update_period);
-SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period, CTLFLAG_RDTUN,
- &sfxge_stats_update_period, 0,
- "netstat interface statistics update period in ticks");
-
#define SFXGE_PARAM_RESTART_ATTEMPTS SFXGE_PARAM(restart_attempts)
static int sfxge_restart_attempts = 3;
TUNABLE_INT(SFXGE_PARAM_RESTART_ATTEMPTS, &sfxge_restart_attempts);
@@ -558,7 +550,8 @@ sfxge_tick(void *arg)
sfxge_port_update_stats(sc);
sfxge_tx_update_stats(sc);
- callout_reset(&sc->tick_callout, sfxge_stats_update_period,
+ callout_reset(&sc->tick_callout,
+ hz * sc->port.stats_update_period_ms / 1000,
sfxge_tick, sc);
}
@@ -623,7 +616,8 @@ sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
goto fail;
- callout_reset(&sc->tick_callout, sfxge_stats_update_period,
+ callout_reset(&sc->tick_callout,
+ hz * sc->port.stats_update_period_ms / 1000,
sfxge_tick, sc);
return (0);
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
index 62b1dc3..1929318 100644
--- a/sys/dev/sfxge/sfxge.h
+++ b/sys/dev/sfxge/sfxge.h
@@ -158,7 +158,7 @@ enum sfxge_evq_state {
#define SFXGE_EV_BATCH 16384
-#define SFXGE_CALLOUT_TICKS 100
+#define SFXGE_STATS_UPDATE_PERIOD_MS 1000
struct sfxge_evq {
/* Structure members below are sorted by usage order */
@@ -247,6 +247,7 @@ struct sfxge_port {
#endif
struct sfxge_hw_stats phy_stats;
struct sfxge_hw_stats mac_stats;
+ uint16_t stats_update_period_ms;
efx_link_mode_t link_mode;
uint8_t mcast_addrs[EFX_MAC_MULTICAST_LIST_MAX *
EFX_MAC_ADDR_LEN];
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
index 6e753a2..edc88ec 100644
--- a/sys/dev/sfxge/sfxge_port.c
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -43,6 +43,15 @@ __FBSDID("$FreeBSD$");
#include "sfxge.h"
+#define SFXGE_PARAM_STATS_UPDATE_PERIOD_MS \
+ SFXGE_PARAM(stats_update_period_ms)
+static int sfxge_stats_update_period_ms = SFXGE_STATS_UPDATE_PERIOD_MS;
+TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD_MS,
+ &sfxge_stats_update_period_ms);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period_ms, CTLFLAG_RDTUN,
+ &sfxge_stats_update_period_ms, 0,
+ "netstat interface statistics update period in milliseconds");
+
static int sfxge_phy_cap_mask(struct sfxge_softc *, int, uint32_t *);
static int
@@ -51,6 +60,7 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
struct sfxge_port *port = &sc->port;
efsys_mem_t *esmp = &(port->mac_stats.dma_buf);
clock_t now;
+ unsigned int min_ticks;
unsigned int count;
int rc;
@@ -61,8 +71,10 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
goto out;
}
+ min_ticks = (unsigned int)hz * port->stats_update_period_ms / 1000;
+
now = ticks;
- if ((unsigned int)(now - port->mac_stats.update_time) < (unsigned int)hz) {
+ if ((unsigned int)(now - port->mac_stats.update_time) < min_ticks) {
rc = 0;
goto out;
}
@@ -483,9 +495,10 @@ sfxge_port_start(struct sfxge_softc *sc)
sfxge_mac_filter_set_locked(sc);
- /* Update MAC stats by DMA every second */
+ /* Update MAC stats by DMA every period */
if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
- 1000, B_FALSE)) != 0)
+ port->stats_update_period_ms,
+ B_FALSE)) != 0)
goto fail6;
if ((rc = efx_mac_drain(enp, B_FALSE)) != 0)
@@ -642,6 +655,68 @@ sfxge_port_fini(struct sfxge_softc *sc)
port->sc = NULL;
}
+static uint16_t
+sfxge_port_stats_update_period_ms(struct sfxge_softc *sc)
+{
+ int period_ms = sfxge_stats_update_period_ms;
+
+ if (period_ms < 0) {
+ device_printf(sc->dev,
+ "treat negative stats update period %d as 0 (disable)\n",
+ period_ms);
+ period_ms = 0;
+ } else if (period_ms > UINT16_MAX) {
+ device_printf(sc->dev,
+ "treat too big stats update period %d as %u\n",
+ period_ms, UINT16_MAX);
+ period_ms = UINT16_MAX;
+ }
+
+ return period_ms;
+}
+
+static int
+sfxge_port_stats_update_period_ms_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_port *port;
+ unsigned int period_ms;
+ int error;
+
+ sc = arg1;
+ port = &sc->port;
+
+ if (req->newptr != NULL) {
+ error = SYSCTL_IN(req, &period_ms, sizeof(period_ms));
+ if (error != 0)
+ return (error);
+
+ if (period_ms > UINT16_MAX)
+ return (EINVAL);
+
+ SFXGE_PORT_LOCK(port);
+
+ if (port->stats_update_period_ms != period_ms) {
+ if (port->init_state == SFXGE_PORT_STARTED)
+ error = efx_mac_stats_periodic(sc->enp,
+ &port->mac_stats.dma_buf,
+ period_ms, B_FALSE);
+ if (error == 0)
+ port->stats_update_period_ms = period_ms;
+ }
+
+ SFXGE_PORT_UNLOCK(port);
+ } else {
+ SFXGE_PORT_LOCK(port);
+ period_ms = port->stats_update_period_ms;
+ SFXGE_PORT_UNLOCK(port);
+
+ error = SYSCTL_OUT(req, &period_ms, sizeof(period_ms));
+ }
+
+ return (error);
+}
+
int
sfxge_port_init(struct sfxge_softc *sc)
{
@@ -690,8 +765,14 @@ sfxge_port_init(struct sfxge_softc *sc)
M_SFXGE, M_WAITOK | M_ZERO);
if ((rc = sfxge_dma_alloc(sc, EFX_MAC_STATS_SIZE, mac_stats_buf)) != 0)
goto fail2;
+ port->stats_update_period_ms = sfxge_port_stats_update_period_ms(sc);
sfxge_mac_stat_init(sc);
+ SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "stats_update_period_ms", CTLTYPE_UINT|CTLFLAG_RW, sc, 0,
+ sfxge_port_stats_update_period_ms_handler, "IU",
+ "interface statistics refresh period");
+
port->init_state = SFXGE_PORT_INITIALIZED;
DBGPRINT(sc->dev, "success");
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index cc6fb17..d8ccbf1 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -356,8 +356,22 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
KASSERT(!txq->blocked, ("txq->blocked"));
+#if SFXGE_TX_PARSE_EARLY
+ /*
+ * If software TSO is used, we still need to copy packet header,
+ * even if we have already parsed it early before enqueue.
+ */
+ if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) &&
+ (txq->tso_fw_assisted == 0))
+ prefetch_read_many(mbuf->m_data);
+#else
+ /*
+ * Prefetch packet header since we need to parse it and extract
+ * IP ID, TCP sequence number and flags.
+ */
if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
prefetch_read_many(mbuf->m_data);
+#endif
if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
rc = EINTR;
diff --git a/sys/dev/sound/pci/als4000.c b/sys/dev/sound/pci/als4000.c
index 0cd51e3..404a58f 100644
--- a/sys/dev/sound/pci/als4000.c
+++ b/sys/dev/sound/pci/als4000.c
@@ -760,8 +760,8 @@ static int
als_resource_grab(device_t dev, struct sc_info *sc)
{
sc->regid = PCIR_BAR(0);
- sc->reg = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->regid, 0, ~0,
- ALS_CONFIG_SPACE_BYTES, RF_ACTIVE);
+ sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->regid,
+ RF_ACTIVE);
if (sc->reg == 0) {
device_printf(dev, "unable to allocate register space\n");
goto bad;
diff --git a/sys/dev/sound/pci/cs4281.c b/sys/dev/sound/pci/cs4281.c
index 6e1b17d..60e89e3 100644
--- a/sys/dev/sound/pci/cs4281.c
+++ b/sys/dev/sound/pci/cs4281.c
@@ -790,12 +790,11 @@ cs4281_pci_attach(device_t dev)
sc->regid = PCIR_BAR(0);
sc->regtype = SYS_RES_MEMORY;
- sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid,
- 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE);
+ sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid, RF_ACTIVE);
if (!sc->reg) {
sc->regtype = SYS_RES_IOPORT;
- sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid,
- 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE);
+ sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid,
+ RF_ACTIVE);
if (!sc->reg) {
device_printf(dev, "unable to allocate register space\n");
goto bad;
@@ -805,8 +804,8 @@ cs4281_pci_attach(device_t dev)
sc->sh = rman_get_bushandle(sc->reg);
sc->memid = PCIR_BAR(1);
- sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 0,
- ~0, CS4281PCI_BA1_SIZE, RF_ACTIVE);
+ sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
+ RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "unable to allocate fifo space\n");
goto bad;
diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c
index 245815d..11d8e43 100644
--- a/sys/dev/sound/pci/hda/hdaa_patches.c
+++ b/sys/dev/sound/pci/hda/hdaa_patches.c
@@ -714,6 +714,15 @@ hdaa_patch_direct(struct hdaa_devinfo *devinfo)
hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid,
0xf88, 0xc0));
break;
+ case HDA_CODEC_ALC1150:
+ if (subid == 0xd9781462) {
+ /* Too low volume on MSI H170 GAMING M3. */
+ hda_command(dev, HDA_CMD_SET_COEFF_INDEX(0, 0x20,
+ 0x07));
+ hda_command(dev, HDA_CMD_SET_PROCESSING_COEFF(0, 0x20,
+ 0x7cb));
+ }
+ break;
}
if (subid == APPLE_INTEL_MAC)
hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid,
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 912a996..55888f2 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -367,6 +367,7 @@
#define HDA_CODEC_ALC889 HDA_CODEC_CONSTRUCT(REALTEK, 0x0889)
#define HDA_CODEC_ALC892 HDA_CODEC_CONSTRUCT(REALTEK, 0x0892)
#define HDA_CODEC_ALC899 HDA_CODEC_CONSTRUCT(REALTEK, 0x0899)
+#define HDA_CODEC_ALC1150 HDA_CODEC_CONSTRUCT(REALTEK, 0x0900)
#define HDA_CODEC_ALCXXXX HDA_CODEC_CONSTRUCT(REALTEK, 0xffff)
/* Motorola */
diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c
index c8d617e..d186a82 100644
--- a/sys/dev/sound/pci/hda/hdacc.c
+++ b/sys/dev/sound/pci/hda/hdacc.c
@@ -111,6 +111,7 @@ static const struct {
{ HDA_CODEC_ALC889, 0, "Realtek ALC889" },
{ HDA_CODEC_ALC892, 0, "Realtek ALC892" },
{ HDA_CODEC_ALC899, 0, "Realtek ALC899" },
+ { HDA_CODEC_ALC1150, 0, "Realtek ALC1150" },
{ HDA_CODEC_AD1882, 0, "Analog Devices AD1882" },
{ HDA_CODEC_AD1882A, 0, "Analog Devices AD1882A" },
{ HDA_CODEC_AD1883, 0, "Analog Devices AD1883" },
diff --git a/sys/dev/sound/pci/vibes.c b/sys/dev/sound/pci/vibes.c
index 733e0d8..acb0e0a 100644
--- a/sys/dev/sound/pci/vibes.c
+++ b/sys/dev/sound/pci/vibes.c
@@ -739,9 +739,8 @@ sv_attach(device_t dev) {
#endif
sc->enh_rid = SV_PCI_ENHANCED;
sc->enh_type = SYS_RES_IOPORT;
- sc->enh_reg = bus_alloc_resource(dev, sc->enh_type,
- &sc->enh_rid, 0, ~0,
- SV_PCI_ENHANCED_SIZE, RF_ACTIVE);
+ sc->enh_reg = bus_alloc_resource_any(dev, sc->enh_type,
+ &sc->enh_rid, RF_ACTIVE);
if (sc->enh_reg == NULL) {
device_printf(dev, "sv_attach: cannot allocate enh\n");
return ENXIO;
@@ -832,9 +831,8 @@ sv_attach(device_t dev) {
/* Cache resource short-cuts for dma_a */
sc->dmaa_rid = SV_PCI_DMAA;
sc->dmaa_type = SYS_RES_IOPORT;
- sc->dmaa_reg = bus_alloc_resource(dev, sc->dmaa_type,
- &sc->dmaa_rid, 0, ~0,
- SV_PCI_ENHANCED_SIZE, RF_ACTIVE);
+ sc->dmaa_reg = bus_alloc_resource_any(dev, sc->dmaa_type,
+ &sc->dmaa_rid, RF_ACTIVE);
if (sc->dmaa_reg == NULL) {
device_printf(dev, "sv_attach: cannot allocate dmaa\n");
goto fail;
@@ -851,9 +849,8 @@ sv_attach(device_t dev) {
/* Cache resource short-cuts for dma_c */
sc->dmac_rid = SV_PCI_DMAC;
sc->dmac_type = SYS_RES_IOPORT;
- sc->dmac_reg = bus_alloc_resource(dev, sc->dmac_type,
- &sc->dmac_rid, 0, ~0,
- SV_PCI_ENHANCED_SIZE, RF_ACTIVE);
+ sc->dmac_reg = bus_alloc_resource_any(dev, sc->dmac_type,
+ &sc->dmac_rid, RF_ACTIVE);
if (sc->dmac_reg == NULL) {
device_printf(dev, "sv_attach: cannot allocate dmac\n");
goto fail;
diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c
index 3cc45b3..cad1a63 100644
--- a/sys/dev/stge/if_stge.c
+++ b/sys/dev/stge/if_stge.c
@@ -507,7 +507,7 @@ stge_attach(device_t dev)
}
}
- if ((error = stge_dma_alloc(sc) != 0))
+ if ((error = stge_dma_alloc(sc)) != 0)
goto fail;
/*
diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c
index 6bb6dd1..73ab501 100644
--- a/sys/dev/tws/tws.c
+++ b/sys/dev/tws/tws.c
@@ -257,8 +257,8 @@ tws_attach(device_t dev)
#ifndef TWS_PULL_MODE_ENABLE
/* Allocate bus space for inbound mfa */
sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
- if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
- &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE))
+ if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &(sc->mfa_res_id), RF_ACTIVE))
== NULL) {
tws_log(sc, ALLOC_MEMORY_RES);
goto attach_fail_2;
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index 354e62d..7d74dd8 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -2272,6 +2272,11 @@ usb_needs_explore(struct usb_bus *bus, uint8_t do_probe)
DPRINTF("\n");
+ if (cold != 0) {
+ DPRINTF("Cold\n");
+ return;
+ }
+
if (bus == NULL) {
DPRINTF("No bus pointer!\n");
return;
@@ -2337,6 +2342,26 @@ usb_needs_explore_all(void)
}
/*------------------------------------------------------------------------*
+ * usb_needs_explore_init
+ *
+ * This function will ensure that the USB controllers are not enumerated
+ * until the "cold" variable is cleared.
+ *------------------------------------------------------------------------*/
+static void
+usb_needs_explore_init(void *arg)
+{
+ /*
+ * The cold variable should be cleared prior to this function
+ * being called:
+ */
+ if (cold == 0)
+ usb_needs_explore_all();
+ else
+ DPRINTFN(-1, "Cold variable is still set!\n");
+}
+SYSINIT(usb_needs_explore_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, usb_needs_explore_init, NULL);
+
+/*------------------------------------------------------------------------*
* usb_bus_power_update
*
* This function will ensure that all USB devices on the given bus are
diff --git a/sys/dev/usb/usb_process.c b/sys/dev/usb/usb_process.c
index e70166c..0f17485 100644
--- a/sys/dev/usb/usb_process.c
+++ b/sys/dev/usb/usb_process.c
@@ -455,14 +455,15 @@ usb_proc_drain(struct usb_process *up)
up->up_csleep = 0;
cv_signal(&up->up_cv);
}
+#ifndef EARLY_AP_STARTUP
/* Check if we are still cold booted */
-
if (cold) {
USB_THREAD_SUSPEND(up->up_ptr);
printf("WARNING: A USB process has "
"been left suspended\n");
break;
}
+#endif
cv_wait(&up->up_cv, up->up_mtx);
}
/* Check if someone is waiting - should not happen */
diff --git a/sys/dev/vte/if_vte.c b/sys/dev/vte/if_vte.c
index dfc06e5..5d06d4f 100644
--- a/sys/dev/vte/if_vte.c
+++ b/sys/dev/vte/if_vte.c
@@ -426,7 +426,7 @@ vte_attach(device_t dev)
/* Reset the ethernet controller. */
vte_reset(sc);
- if ((error = vte_dma_alloc(sc) != 0))
+ if ((error = vte_dma_alloc(sc)) != 0)
goto fail;
/* Create device sysctl node. */
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index cdbda07..a5029ae 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -2013,25 +2013,17 @@ again:
}
/*
- * Check to see if entries in this directory can be safely acquired
- * via VFS_VGET() or if a switch to VOP_LOOKUP() is required.
- * ZFS snapshot directories need VOP_LOOKUP(), so that any
- * automount of the snapshot directory that is required will
- * be done.
- * This needs to be done here for NFSv4, since NFSv4 never does
- * a VFS_VGET() for "." or "..".
+ * For now ZFS requires VOP_LOOKUP as a workaround. Until ino_t is changed
+ * to 64 bit type a ZFS filesystem with over 1 billion files in it
+ * will suffer from 64bit -> 32bit truncation.
*/
- if (is_zfs == 1) {
- r = VFS_VGET(mp, at.na_fileid, LK_SHARED, &nvp);
- if (r == EOPNOTSUPP) {
- usevget = 0;
- cn.cn_nameiop = LOOKUP;
- cn.cn_lkflags = LK_SHARED | LK_RETRY;
- cn.cn_cred = nd->nd_cred;
- cn.cn_thread = p;
- } else if (r == 0)
- vput(nvp);
- }
+ if (is_zfs == 1)
+ usevget = 0;
+
+ cn.cn_nameiop = LOOKUP;
+ cn.cn_lkflags = LK_SHARED | LK_RETRY;
+ cn.cn_cred = nd->nd_cred;
+ cn.cn_thread = p;
/*
* Save this position, in case there is an error before one entry
@@ -2100,16 +2092,7 @@ again:
else
r = EOPNOTSUPP;
if (r == EOPNOTSUPP) {
- if (usevget) {
- usevget = 0;
- cn.cn_nameiop = LOOKUP;
- cn.cn_lkflags =
- LK_SHARED |
- LK_RETRY;
- cn.cn_cred =
- nd->nd_cred;
- cn.cn_thread = p;
- }
+ usevget = 0;
cn.cn_nameptr = dp->d_name;
cn.cn_namelen = nlen;
cn.cn_flags = ISLASTCN |
diff --git a/sys/fs/pseudofs/pseudofs_vncache.c b/sys/fs/pseudofs/pseudofs_vncache.c
index 1bec5a4..093d805 100644
--- a/sys/fs/pseudofs/pseudofs_vncache.c
+++ b/sys/fs/pseudofs/pseudofs_vncache.c
@@ -51,6 +51,7 @@ static struct mtx pfs_vncache_mutex;
static struct pfs_vdata *pfs_vncache;
static eventhandler_tag pfs_exit_tag;
static void pfs_exit(void *arg, struct proc *p);
+static void pfs_purge_locked(struct pfs_node *pn, bool force);
static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0,
"pseudofs vnode cache");
@@ -97,6 +98,9 @@ pfs_vncache_unload(void)
{
EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
+ mtx_lock(&pfs_vncache_mutex);
+ pfs_purge_locked(NULL, true);
+ mtx_unlock(&pfs_vncache_mutex);
KASSERT(pfs_vncache_entries == 0,
("%d vncache entries remaining", pfs_vncache_entries));
mtx_destroy(&pfs_vncache_mutex);
@@ -272,7 +276,7 @@ pfs_vncache_free(struct vnode *vp)
* used to implement the cache.
*/
static void
-pfs_purge_locked(struct pfs_node *pn)
+pfs_purge_locked(struct pfs_node *pn, bool force)
{
struct pfs_vdata *pvd;
struct vnode *vnp;
@@ -280,7 +284,8 @@ pfs_purge_locked(struct pfs_node *pn)
mtx_assert(&pfs_vncache_mutex, MA_OWNED);
pvd = pfs_vncache;
while (pvd != NULL) {
- if (pvd->pvd_dead || (pn != NULL && pvd->pvd_pn == pn)) {
+ if (force || pvd->pvd_dead ||
+ (pn != NULL && pvd->pvd_pn == pn)) {
vnp = pvd->pvd_vnode;
vhold(vnp);
mtx_unlock(&pfs_vncache_mutex);
@@ -301,7 +306,7 @@ pfs_purge(struct pfs_node *pn)
{
mtx_lock(&pfs_vncache_mutex);
- pfs_purge_locked(pn);
+ pfs_purge_locked(pn, false);
mtx_unlock(&pfs_vncache_mutex);
}
@@ -321,6 +326,6 @@ pfs_exit(void *arg, struct proc *p)
if (pvd->pvd_pid == p->p_pid)
dead = pvd->pvd_dead = 1;
if (dead)
- pfs_purge_locked(NULL);
+ pfs_purge_locked(NULL, false);
mtx_unlock(&pfs_vncache_mutex);
}
diff --git a/sys/fs/tmpfs/tmpfs.h b/sys/fs/tmpfs/tmpfs.h
index b077489..37e5bbb 100644
--- a/sys/fs/tmpfs/tmpfs.h
+++ b/sys/fs/tmpfs/tmpfs.h
@@ -80,8 +80,10 @@ struct tmpfs_dirent {
uint32_t td_hash;
u_int td_namelen;
- /* Pointer to the node this entry refers to. In case this field
- * is NULL, the node is a whiteout. */
+ /*
+ * Pointer to the node this entry refers to. In case this field
+ * is NULL, the node is a whiteout.
+ */
struct tmpfs_node * td_node;
union {
@@ -94,21 +96,24 @@ struct tmpfs_dirent {
} ud;
};
-/* A directory in tmpfs holds a list of directory entries, which in
- * turn point to other files (which can be directories themselves).
+/*
+ * A directory in tmpfs holds a collection of directory entries, which
+ * in turn point to other files (which can be directories themselves).
*
- * In tmpfs, this list is managed by a RB-Tree, whose head is defined by
- * the struct tmpfs_dir type.
+ * In tmpfs, this collection is managed by a RB-Tree, whose head is
+ * defined by the struct tmpfs_dir type.
*
* It is important to notice that directories do not have entries for . and
* .. as other file systems do. These can be generated when requested
* based on information available by other means, such as the pointer to
* the node itself in the former case or the pointer to the parent directory
* in the latter case. This is done to simplify tmpfs's code and, more
- * importantly, to remove redundancy. */
+ * importantly, to remove redundancy.
+ */
RB_HEAD(tmpfs_dir, tmpfs_dirent);
-/* Each entry in a directory has a cookie that identifies it. Cookies
+/*
+ * Each entry in a directory has a cookie that identifies it. Cookies
* supersede offsets within directories because, given how tmpfs stores
* directories in memory, there is no such thing as an offset.
*
@@ -139,51 +144,67 @@ RB_HEAD(tmpfs_dir, tmpfs_dirent);
* a particular type. The code must be careful to only access those
* attributes that are actually allowed by the node's type.
*
- *
* Below is the key of locks used to protected the fields in the following
* structures.
- *
+ * (v) vnode lock in exclusive mode
+ * (vi) vnode lock in exclusive mode, or vnode lock in shared vnode and
+ * tn_interlock
+ * (i) tn_interlock
+ * (m) tmpfs_mount tm_allnode_lock
+ * (c) stable after creation
*/
struct tmpfs_node {
- /* Doubly-linked list entry which links all existing nodes for a
- * single file system. This is provided to ease the removal of
- * all nodes during the unmount operation. */
- LIST_ENTRY(tmpfs_node) tn_entries;
+ /*
+ * Doubly-linked list entry which links all existing nodes for
+ * a single file system. This is provided to ease the removal
+ * of all nodes during the unmount operation, and to support
+ * the implementation of VOP_VNTOCNP(). tn_attached is false
+ * when the node is removed from list and unlocked.
+ */
+ LIST_ENTRY(tmpfs_node) tn_entries; /* (m) */
+ bool tn_attached; /* (m) */
- /* The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO',
+ /*
+ * The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO',
* 'VLNK', 'VREG' and 'VSOCK' is allowed. The usage of vnode
* types instead of a custom enumeration is to make things simpler
- * and faster, as we do not need to convert between two types. */
- enum vtype tn_type;
+ * and faster, as we do not need to convert between two types.
+ */
+ enum vtype tn_type; /* (c) */
/* Node identifier. */
- ino_t tn_id;
+ ino_t tn_id; /* (c) */
- /* Node's internal status. This is used by several file system
+ /*
+ * Node's internal status. This is used by several file system
* operations to do modifications to the node in a delayed
- * fashion. */
- int tn_status;
+ * fashion.
+ */
+ int tn_status; /* (vi) */
#define TMPFS_NODE_ACCESSED (1 << 1)
#define TMPFS_NODE_MODIFIED (1 << 2)
#define TMPFS_NODE_CHANGED (1 << 3)
- /* The node size. It does not necessarily match the real amount
- * of memory consumed by it. */
- off_t tn_size;
+ /*
+ * The node size. It does not necessarily match the real amount
+ * of memory consumed by it.
+ */
+ off_t tn_size; /* (v) */
/* Generic node attributes. */
- uid_t tn_uid;
- gid_t tn_gid;
- mode_t tn_mode;
- u_long tn_flags;
- nlink_t tn_links;
- struct timespec tn_atime;
- struct timespec tn_mtime;
- struct timespec tn_ctime;
- struct timespec tn_birthtime;
- unsigned long tn_gen;
-
- /* As there is a single vnode for each active file within the
+ uid_t tn_uid; /* (v) */
+ gid_t tn_gid; /* (v) */
+ mode_t tn_mode; /* (v) */
+ u_long tn_flags; /* (v) */
+ nlink_t tn_links; /* (v) */
+ struct timespec tn_atime; /* (vi) */
+ struct timespec tn_mtime; /* (vi) */
+ struct timespec tn_ctime; /* (vi) */
+ struct timespec tn_birthtime; /* (v) */
+ unsigned long tn_gen; /* (c) */
+
+ /*
+ * As there is a single vnode for each active file within the
* system, care has to be taken to avoid allocating more than one
* vnode per file. In order to do this, a bidirectional association
* is kept between vnodes and nodes.
@@ -196,74 +217,84 @@ struct tmpfs_node {
* tn_vnode.
*
* May be NULL when the node is unused (that is, no vnode has been
- * allocated for it or it has been reclaimed). */
- struct vnode * tn_vnode;
+ * allocated for it or it has been reclaimed).
+ */
+ struct vnode * tn_vnode; /* (i) */
- /* interlock to protect tn_vpstate */
+ /*
+ * Interlock to protect tn_vpstate, and tn_status under shared
+ * vnode lock.
+ */
struct mtx tn_interlock;
- /* Identify if current node has vnode assiocate with
+ /*
+ * Identify if current node has vnode assiocate with
* or allocating vnode.
*/
- int tn_vpstate;
+ int tn_vpstate; /* (i) */
+
+ /* Transient refcounter on this node. */
+ u_int tn_refcount; /* (m) + (i) */
/* misc data field for different tn_type node */
union {
/* Valid when tn_type == VBLK || tn_type == VCHR. */
- dev_t tn_rdev;
+ dev_t tn_rdev; /* (c) */
/* Valid when tn_type == VDIR. */
struct tn_dir {
- /* Pointer to the parent directory. The root
+ /*
+ * Pointer to the parent directory. The root
* directory has a pointer to itself in this field;
- * this property identifies the root node. */
+ * this property identifies the root node.
+ */
struct tmpfs_node * tn_parent;
- /* Head of a tree that links the contents of
- * the directory together. */
+ /*
+ * Head of a tree that links the contents of
+ * the directory together.
+ */
struct tmpfs_dir tn_dirhead;
- /* Head of a list the contains fake directory entries
+ /*
+ * Head of a list the contains fake directory entries
* heads, i.e. entries with TMPFS_DIRCOOKIE_DUPHEAD
- * flag. */
+ * flag.
+ */
struct tmpfs_dir_duphead tn_dupindex;
- /* Number and pointer of the first directory entry
+ /*
+ * Number and pointer of the first directory entry
* returned by the readdir operation if it were
* called again to continue reading data from the
* same directory as before. This is used to speed
* up reads of long directories, assuming that no
* more than one read is in progress at a given time.
- * Otherwise, these values are discarded. */
+ * Otherwise, these values are discarded.
+ */
off_t tn_readdir_lastn;
struct tmpfs_dirent * tn_readdir_lastp;
} tn_dir;
/* Valid when tn_type == VLNK. */
/* The link's target, allocated from a string pool. */
- char * tn_link;
+ char * tn_link; /* (c) */
/* Valid when tn_type == VREG. */
struct tn_reg {
- /* The contents of regular files stored in a tmpfs
- * file system are represented by a single anonymous
- * memory object (aobj, for short). The aobj provides
- * direct access to any position within the file,
- * because its contents are always mapped in a
- * contiguous region of virtual memory. It is a task
- * of the memory management subsystem (see uvm(9)) to
- * issue the required page ins or page outs whenever
- * a position within the file is accessed. */
- vm_object_t tn_aobj;
-
- }tn_reg;
-
- /* Valid when tn_type = VFIFO */
- struct tn_fifo {
- fo_rdwr_t *tn_fo_read;
- fo_rdwr_t *tn_fo_write;
- }tn_fifo;
- }tn_spec;
+ /*
+ * The contents of regular files stored in a
+ * tmpfs file system are represented by a
+ * single anonymous memory object (aobj, for
+ * short). The aobj provides direct access to
+ * any position within the file. It is a task
+ * of the memory management subsystem to issue
+ * the required page ins or page outs whenever
+ * a position within the file is accessed.
+ */
+ vm_object_t tn_aobj; /* (c) */
+ } tn_reg;
+ } tn_spec; /* (v) */
};
LIST_HEAD(tmpfs_node_list, tmpfs_node);
@@ -281,21 +312,12 @@ LIST_HEAD(tmpfs_node_list, tmpfs_node);
#ifdef INVARIANTS
#define TMPFS_ASSERT_LOCKED(node) do { \
- MPASS(node != NULL); \
- MPASS(node->tn_vnode != NULL); \
- if (!VOP_ISLOCKED(node->tn_vnode) && \
- !mtx_owned(TMPFS_NODE_MTX(node))) \
- panic("tmpfs: node is not locked: %p", node); \
- } while (0)
-#define TMPFS_ASSERT_ELOCKED(node) do { \
MPASS((node) != NULL); \
MPASS((node)->tn_vnode != NULL); \
- mtx_assert(TMPFS_NODE_MTX(node), MA_OWNED); \
- ASSERT_VOP_LOCKED((node)->tn_vnode, "tmpfs"); \
+ ASSERT_VOP_LOCKED((node)->tn_vnode, "tmpfs assert"); \
} while (0)
#else
#define TMPFS_ASSERT_LOCKED(node) (void)0
-#define TMPFS_ASSERT_ELOCKED(node) (void)0
#endif
#define TMPFS_VNODE_ALLOCATING 1
@@ -307,26 +329,32 @@ LIST_HEAD(tmpfs_node_list, tmpfs_node);
* Internal representation of a tmpfs mount point.
*/
struct tmpfs_mount {
- /* Maximum number of memory pages available for use by the file
+ /*
+ * Maximum number of memory pages available for use by the file
* system, set during mount time. This variable must never be
* used directly as it may be bigger than the current amount of
- * free memory; in the extreme case, it will hold the SIZE_MAX
- * value. */
- size_t tm_pages_max;
+ * free memory; in the extreme case, it will hold the ULONG_MAX
+ * value.
+ */
+ u_long tm_pages_max;
/* Number of pages in use by the file system. */
- size_t tm_pages_used;
+ u_long tm_pages_used;
- /* Pointer to the node representing the root directory of this
- * file system. */
+ /*
+ * Pointer to the node representing the root directory of this
+ * file system.
+ */
struct tmpfs_node * tm_root;
- /* Maximum number of possible nodes for this file system; set
+ /*
+ * Maximum number of possible nodes for this file system; set
* during mount time. We need a hard limit on the maximum number
* of nodes to avoid allocating too much of them; their objects
* cannot be released until the file system is unmounted.
* Otherwise, we could easily run out of memory by creating lots
- * of empty files and then simply removing them. */
+ * of empty files and then simply removing them.
+ */
ino_t tm_nodes_max;
/* unrhdr used to allocate inode numbers */
@@ -335,38 +363,33 @@ struct tmpfs_mount {
/* Number of nodes currently that are in use. */
ino_t tm_nodes_inuse;
+ /* Refcounter on this struct tmpfs_mount. */
+ uint64_t tm_refcount;
+
/* maximum representable file size */
u_int64_t tm_maxfilesize;
- /* Nodes are organized in two different lists. The used list
- * contains all nodes that are currently used by the file system;
- * i.e., they refer to existing files. The available list contains
- * all nodes that are currently available for use by new files.
- * Nodes must be kept in this list (instead of deleting them)
- * because we need to keep track of their generation number (tn_gen
- * field).
- *
- * Note that nodes are lazily allocated: if the available list is
- * empty and we have enough space to create more nodes, they will be
- * created and inserted in the used list. Once these are released,
- * they will go into the available list, remaining alive until the
- * file system is unmounted. */
+ /*
+ * The used list contains all nodes that are currently used by
+ * the file system; i.e., they refer to existing files.
+ */
struct tmpfs_node_list tm_nodes_used;
- /* All node lock to protect the node list and tmp_pages_used */
- struct mtx allnode_lock;
+ /* All node lock to protect the node list and tmp_pages_used. */
+ struct mtx tm_allnode_lock;
- /* Pools used to store file system meta data. These are not shared
- * across several instances of tmpfs for the reasons described in
- * tmpfs_pool.c. */
+ /* Zones used to store file system meta data, per tmpfs mount. */
uma_zone_t tm_dirent_pool;
uma_zone_t tm_node_pool;
/* Read-only status. */
- int tm_ronly;
+ bool tm_ronly;
+ /* Do not use namecache. */
+ bool tm_nonc;
};
-#define TMPFS_LOCK(tm) mtx_lock(&(tm)->allnode_lock)
-#define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->allnode_lock)
+#define TMPFS_LOCK(tm) mtx_lock(&(tm)->tm_allnode_lock)
+#define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->tm_allnode_lock)
+#define TMPFS_MP_ASSERT_LOCKED(tm) mtx_assert(&(tm)->tm_allnode_lock, MA_OWNED)
/*
* This structure maps a file identifier to a tmpfs node. Used by the
@@ -379,15 +402,24 @@ struct tmpfs_fid {
unsigned long tf_gen;
};
+struct tmpfs_dir_cursor {
+ struct tmpfs_dirent *tdc_current;
+ struct tmpfs_dirent *tdc_tree;
+};
+
#ifdef _KERNEL
/*
* Prototypes for tmpfs_subr.c.
*/
+void tmpfs_ref_node(struct tmpfs_node *node);
+void tmpfs_ref_node_locked(struct tmpfs_node *node);
int tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *, enum vtype,
uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *,
char *, dev_t, struct tmpfs_node **);
void tmpfs_free_node(struct tmpfs_mount *, struct tmpfs_node *);
+bool tmpfs_free_node_locked(struct tmpfs_mount *, struct tmpfs_node *, bool);
+void tmpfs_free_tmp(struct tmpfs_mount *);
int tmpfs_alloc_dirent(struct tmpfs_mount *, struct tmpfs_node *,
const char *, u_int, struct tmpfs_dirent **);
void tmpfs_free_dirent(struct tmpfs_mount *, struct tmpfs_dirent *);
@@ -420,8 +452,13 @@ int tmpfs_chtimes(struct vnode *, struct vattr *, struct ucred *cred,
void tmpfs_itimes(struct vnode *, const struct timespec *,
const struct timespec *);
+void tmpfs_set_status(struct tmpfs_node *node, int status);
void tmpfs_update(struct vnode *);
int tmpfs_truncate(struct vnode *, off_t);
+struct tmpfs_dirent *tmpfs_dir_first(struct tmpfs_node *dnode,
+ struct tmpfs_dir_cursor *dc);
+struct tmpfs_dirent *tmpfs_dir_next(struct tmpfs_node *dnode,
+ struct tmpfs_dir_cursor *dc);
/*
* Convenience macros to simplify some logical expressions.
@@ -447,10 +484,6 @@ int tmpfs_truncate(struct vnode *, off_t);
} while (0)
/*
- * Memory management stuff.
- */
-
-/*
* Amount of memory pages to reserve for the system (e.g., to not use by
* tmpfs).
*/
@@ -467,37 +500,41 @@ size_t tmpfs_pages_used(struct tmpfs_mount *tmp);
* specific ones.
*/
-static inline
-struct tmpfs_mount *
+static inline struct tmpfs_mount *
VFS_TO_TMPFS(struct mount *mp)
{
struct tmpfs_mount *tmp;
- MPASS((mp) != NULL && (mp)->mnt_data != NULL);
- tmp = (struct tmpfs_mount *)(mp)->mnt_data;
- return tmp;
+ MPASS(mp != NULL && mp->mnt_data != NULL);
+ tmp = (struct tmpfs_mount *)mp->mnt_data;
+ return (tmp);
}
-static inline
-struct tmpfs_node *
+static inline struct tmpfs_node *
VP_TO_TMPFS_NODE(struct vnode *vp)
{
struct tmpfs_node *node;
- MPASS((vp) != NULL && (vp)->v_data != NULL);
+ MPASS(vp != NULL && vp->v_data != NULL);
node = (struct tmpfs_node *)vp->v_data;
- return node;
+ return (node);
}
-static inline
-struct tmpfs_node *
+static inline struct tmpfs_node *
VP_TO_TMPFS_DIR(struct vnode *vp)
{
struct tmpfs_node *node;
node = VP_TO_TMPFS_NODE(vp);
TMPFS_VALIDATE_DIR(node);
- return node;
+ return (node);
+}
+
+static inline bool
+tmpfs_use_nc(struct vnode *vp)
+{
+
+ return (!(VFS_TO_TMPFS(vp->v_mount)->tm_nonc));
}
#endif /* _FS_TMPFS_TMPFS_H_ */
diff --git a/sys/fs/tmpfs/tmpfs_fifoops.c b/sys/fs/tmpfs/tmpfs_fifoops.c
index 89ebe85..f1743db 100644
--- a/sys/fs/tmpfs/tmpfs_fifoops.c
+++ b/sys/fs/tmpfs/tmpfs_fifoops.c
@@ -49,35 +49,14 @@
#include <fs/tmpfs/tmpfs_vnops.h>
static int
-tmpfs_fifo_kqfilter(struct vop_kqfilter_args *ap)
-{
- struct vnode *vp;
- struct tmpfs_node *node;
-
- vp = ap->a_vp;
- node = VP_TO_TMPFS_NODE(vp);
-
- switch (ap->a_kn->kn_filter){
- case EVFILT_READ:
- node->tn_status |= TMPFS_NODE_ACCESSED;
- break;
- case EVFILT_WRITE:
- node->tn_status |= TMPFS_NODE_MODIFIED;
- break;
- }
-
- return fifo_specops.vop_kqfilter(ap);
-}
-
-static int
tmpfs_fifo_close(struct vop_close_args *v)
{
struct tmpfs_node *node;
- node = VP_TO_TMPFS_NODE(v->a_vp);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ node = VP_TO_TMPFS_NODE(v->a_vp);
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
tmpfs_update(v->a_vp);
- return fifo_specops.vop_close(v);
+ return (fifo_specops.vop_close(v));
}
/*
@@ -90,6 +69,5 @@ struct vop_vector tmpfs_fifoop_entries = {
.vop_access = tmpfs_access,
.vop_getattr = tmpfs_getattr,
.vop_setattr = tmpfs_setattr,
- .vop_kqfilter = tmpfs_fifo_kqfilter,
};
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 9781b2c..816b3cc 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -61,11 +61,6 @@ __FBSDID("$FreeBSD$");
#include <fs/tmpfs/tmpfs_fifoops.h>
#include <fs/tmpfs/tmpfs_vnops.h>
-struct tmpfs_dir_cursor {
- struct tmpfs_dirent *tdc_current;
- struct tmpfs_dirent *tdc_tree;
-};
-
SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmpfs file system");
static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
@@ -129,13 +124,33 @@ tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
if (tmpfs_mem_avail() < req_pages)
return (0);
- if (tmp->tm_pages_max != SIZE_MAX &&
+ if (tmp->tm_pages_max != ULONG_MAX &&
tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp))
return (0);
return (1);
}
+void
+tmpfs_ref_node(struct tmpfs_node *node)
+{
+
+ TMPFS_NODE_LOCK(node);
+ tmpfs_ref_node_locked(node);
+ TMPFS_NODE_UNLOCK(node);
+}
+
+void
+tmpfs_ref_node_locked(struct tmpfs_node *node)
+{
+
+ TMPFS_NODE_ASSERT_LOCKED(node);
+ KASSERT(node->tn_refcount > 0, ("node %p zero refcount", node));
+ KASSERT(node->tn_refcount < UINT_MAX, ("node %p refcount %u", node,
+ node->tn_refcount));
+ node->tn_refcount++;
+}
+
/*
* Allocates a new node of type 'type' inside the 'tmp' mount point, with
* its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
@@ -198,8 +213,8 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
return (EBUSY);
}
- nnode = (struct tmpfs_node *)uma_zalloc_arg(
- tmp->tm_node_pool, tmp, M_WAITOK);
+ nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp,
+ M_WAITOK);
/* Generic initialization. */
nnode->tn_type = type;
@@ -210,6 +225,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
nnode->tn_gid = gid;
nnode->tn_mode = mode;
nnode->tn_id = alloc_unr(tmp->tm_ino_unr);
+ nnode->tn_refcount = 1;
/* Type-specific initialization. */
switch (nnode->tn_type) {
@@ -257,58 +273,65 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
break;
default:
- panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type);
+ panic("tmpfs_alloc_node: type %p %d", nnode,
+ (int)nnode->tn_type);
}
TMPFS_LOCK(tmp);
LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
+ nnode->tn_attached = true;
tmp->tm_nodes_inuse++;
+ tmp->tm_refcount++;
TMPFS_UNLOCK(tmp);
*node = nnode;
- return 0;
+ return (0);
}
/*
* Destroys the node pointed to by node from the file system 'tmp'.
- * If the node does not belong to the given mount point, the results are
- * unpredicted.
- *
- * If the node references a directory; no entries are allowed because
- * their removal could need a recursive algorithm, something forbidden in
- * kernel space. Furthermore, there is not need to provide such
- * functionality (recursive removal) because the only primitives offered
- * to the user are the removal of empty directories and the deletion of
- * individual files.
- *
- * Note that nodes are not really deleted; in fact, when a node has been
- * allocated, it cannot be deleted during the whole life of the file
- * system. Instead, they are moved to the available list and remain there
- * until reused.
+ * If the node references a directory, no entries are allowed.
*/
void
tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
{
+
+ TMPFS_LOCK(tmp);
+ TMPFS_NODE_LOCK(node);
+ if (!tmpfs_free_node_locked(tmp, node, false)) {
+ TMPFS_NODE_UNLOCK(node);
+ TMPFS_UNLOCK(tmp);
+ }
+}
+
+bool
+tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
+ bool detach)
+{
vm_object_t uobj;
+ TMPFS_MP_ASSERT_LOCKED(tmp);
+ TMPFS_NODE_ASSERT_LOCKED(node);
+ KASSERT(node->tn_refcount > 0, ("node %p refcount zero", node));
+
+ node->tn_refcount--;
+ if (node->tn_attached && (detach || node->tn_refcount == 0)) {
+ MPASS(tmp->tm_nodes_inuse > 0);
+ tmp->tm_nodes_inuse--;
+ LIST_REMOVE(node, tn_entries);
+ node->tn_attached = false;
+ }
+ if (node->tn_refcount > 0)
+ return (false);
+
#ifdef INVARIANTS
- TMPFS_NODE_LOCK(node);
MPASS(node->tn_vnode == NULL);
MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0);
- TMPFS_NODE_UNLOCK(node);
#endif
-
- TMPFS_LOCK(tmp);
- LIST_REMOVE(node, tn_entries);
- tmp->tm_nodes_inuse--;
+ TMPFS_NODE_UNLOCK(node);
TMPFS_UNLOCK(tmp);
switch (node->tn_type) {
- case VNON:
- /* Do not do anything. VNON is provided to let the
- * allocation routine clean itself easily by avoiding
- * duplicating code in it. */
- /* FALLTHROUGH */
case VBLK:
/* FALLTHROUGH */
case VCHR:
@@ -327,9 +350,7 @@ tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
case VREG:
uobj = node->tn_reg.tn_aobj;
if (uobj != NULL) {
- TMPFS_LOCK(tmp);
- tmp->tm_pages_used -= uobj->size;
- TMPFS_UNLOCK(tmp);
+ atomic_subtract_long(&tmp->tm_pages_used, uobj->size);
KASSERT((uobj->flags & OBJ_TMPFS) == 0,
("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj));
vm_object_deallocate(uobj);
@@ -342,6 +363,9 @@ tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
free_unr(tmp->tm_ino_unr, node->tn_id);
uma_zfree(tmp->tm_node_pool, node);
+ TMPFS_LOCK(tmp);
+ tmpfs_free_tmp(tmp);
+ return (true);
}
static __inline uint32_t
@@ -487,13 +511,16 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
struct vnode **vpp)
{
struct vnode *vp;
+ struct tmpfs_mount *tm;
vm_object_t object;
int error;
error = 0;
-loop:
+ tm = VFS_TO_TMPFS(mp);
TMPFS_NODE_LOCK(node);
-loop1:
+ tmpfs_ref_node_locked(node);
+loop:
+ TMPFS_NODE_ASSERT_LOCKED(node);
if ((vp = node->tn_vnode) != NULL) {
MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
VI_LOCK(vp);
@@ -513,12 +540,14 @@ loop1:
msleep(&node->tn_vnode, TMPFS_NODE_MTX(node),
0, "tmpfsE", 0);
}
- goto loop1;
+ goto loop;
}
TMPFS_NODE_UNLOCK(node);
error = vget(vp, lkflag | LK_INTERLOCK, curthread);
- if (error == ENOENT)
+ if (error == ENOENT) {
+ TMPFS_NODE_LOCK(node);
goto loop;
+ }
if (error != 0) {
vp = NULL;
goto out;
@@ -530,6 +559,7 @@ loop1:
*/
if (node->tn_vnode == NULL || node->tn_vnode != vp) {
vput(vp);
+ TMPFS_NODE_LOCK(node);
goto loop;
}
@@ -551,11 +581,9 @@ loop1:
if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) {
node->tn_vpstate |= TMPFS_VNODE_WANT;
error = msleep((caddr_t) &node->tn_vpstate,
- TMPFS_NODE_MTX(node), PDROP | PCATCH,
- "tmpfs_alloc_vp", 0);
- if (error)
- return error;
-
+ TMPFS_NODE_MTX(node), 0, "tmpfs_alloc_vp", 0);
+ if (error != 0)
+ goto out;
goto loop;
} else
node->tn_vpstate |= TMPFS_VNODE_ALLOCATING;
@@ -563,7 +591,8 @@ loop1:
TMPFS_NODE_UNLOCK(node);
/* Get a new vnode and associate it with our node. */
- error = getnewvnode("tmpfs", mp, &tmpfs_vnodeop_entries, &vp);
+ error = getnewvnode("tmpfs", mp, VFS_TO_TMPFS(mp)->tm_nonc ?
+ &tmpfs_vnodeop_nonc_entries : &tmpfs_vnodeop_entries, &vp);
if (error != 0)
goto unlock;
MPASS(vp != NULL);
@@ -611,7 +640,7 @@ loop1:
VN_LOCK_ASHARE(vp);
error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL);
- if (error)
+ if (error != 0)
vp = NULL;
unlock:
@@ -629,18 +658,19 @@ unlock:
TMPFS_NODE_UNLOCK(node);
out:
- *vpp = vp;
+ if (error == 0) {
+ *vpp = vp;
#ifdef INVARIANTS
- if (error == 0) {
MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp));
TMPFS_NODE_LOCK(node);
MPASS(*vpp == node->tn_vnode);
TMPFS_NODE_UNLOCK(node);
- }
#endif
+ }
+ tmpfs_free_node(tm, node);
- return error;
+ return (error);
}
/*
@@ -683,7 +713,7 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
struct tmpfs_node *node;
struct tmpfs_node *parent;
- MPASS(VOP_ISLOCKED(dvp));
+ ASSERT_VOP_ELOCKED(dvp, "tmpfs_alloc_file");
MPASS(cnp->cn_flags & HASBUF);
tmp = VFS_TO_TMPFS(dvp->v_mount);
@@ -708,8 +738,8 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
/* Allocate a node that represents the new file. */
error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type,
- cnp->cn_cred->cr_uid,
- dnode->tn_gid, vap->va_mode, parent, target, vap->va_rdev, &node);
+ cnp->cn_cred->cr_uid, dnode->tn_gid, vap->va_mode, parent,
+ target, vap->va_rdev, &node);
if (error != 0)
return (error);
@@ -738,7 +768,7 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
return (0);
}
-static struct tmpfs_dirent *
+struct tmpfs_dirent *
tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
{
struct tmpfs_dirent *de;
@@ -752,7 +782,7 @@ tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
return (dc->tdc_current);
}
-static struct tmpfs_dirent *
+struct tmpfs_dirent *
tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc)
{
struct tmpfs_dirent *de;
@@ -1092,9 +1122,9 @@ tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
else
error = uiomove(&dent, dent.d_reclen, uio);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
- return error;
+ return (error);
}
/*
@@ -1117,9 +1147,8 @@ tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
* Return ENOENT if the current node is already removed.
*/
TMPFS_ASSERT_LOCKED(node);
- if (node->tn_dir.tn_parent == NULL) {
+ if (node->tn_dir.tn_parent == NULL)
return (ENOENT);
- }
TMPFS_NODE_LOCK(node->tn_dir.tn_parent);
dent.d_fileno = node->tn_dir.tn_parent->tn_id;
@@ -1137,9 +1166,9 @@ tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
else
error = uiomove(&dent, dent.d_reclen, uio);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
- return error;
+ return (error);
}
/*
@@ -1282,7 +1311,7 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, int maxcookies,
node->tn_dir.tn_readdir_lastn = off;
node->tn_dir.tn_readdir_lastp = de;
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
return error;
}
@@ -1413,9 +1442,7 @@ retry:
uobj->size = newpages;
VM_OBJECT_WUNLOCK(uobj);
- TMPFS_LOCK(tmp);
- tmp->tm_pages_used += (newpages - oldpages);
- TMPFS_UNLOCK(tmp);
+ atomic_add_long(&tmp->tm_pages_used, newpages - oldpages);
node->tn_size = newsize;
return (0);
@@ -1458,7 +1485,7 @@ tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chflags");
node = VP_TO_TMPFS_NODE(vp);
@@ -1498,9 +1525,9 @@ tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
node->tn_flags = flags;
node->tn_status |= TMPFS_NODE_CHANGED;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chflags2");
- return 0;
+ return (0);
}
/*
@@ -1514,7 +1541,7 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chmod");
node = VP_TO_TMPFS_NODE(vp);
@@ -1554,9 +1581,9 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
node->tn_status |= TMPFS_NODE_CHANGED;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chmod2");
- return 0;
+ return (0);
}
/*
@@ -1575,7 +1602,7 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
uid_t ouid;
gid_t ogid;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chown");
node = VP_TO_TMPFS_NODE(vp);
@@ -1625,9 +1652,9 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
node->tn_mode &= ~(S_ISUID | S_ISGID);
}
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chown2");
- return 0;
+ return (0);
}
/*
@@ -1642,7 +1669,7 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chsize");
node = VP_TO_TMPFS_NODE(vp);
@@ -1680,9 +1707,9 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
* for us, as will update tn_status; no need to do that here. */
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chsize2");
- return error;
+ return (error);
}
/*
@@ -1697,7 +1724,7 @@ tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
int error;
struct tmpfs_node *node;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chtimes");
node = VP_TO_TMPFS_NODE(vp);
@@ -1728,9 +1755,20 @@ tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
if (vap->va_birthtime.tv_nsec != VNOVAL &&
vap->va_birthtime.tv_nsec != VNOVAL)
node->tn_birthtime = vap->va_birthtime;
- MPASS(VOP_ISLOCKED(vp));
+ ASSERT_VOP_ELOCKED(vp, "chtimes2");
- return 0;
+ return (0);
+}
+
+void
+tmpfs_set_status(struct tmpfs_node *node, int status)
+{
+
+ if ((node->tn_status & status) == status)
+ return;
+ TMPFS_NODE_LOCK(node);
+ node->tn_status |= status;
+ TMPFS_NODE_UNLOCK(node);
}
/* Sync timestamps */
@@ -1741,6 +1779,7 @@ tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
struct tmpfs_node *node;
struct timespec now;
+ ASSERT_VOP_LOCKED(vp, "tmpfs_itimes");
node = VP_TO_TMPFS_NODE(vp);
if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
@@ -1748,6 +1787,7 @@ tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
return;
vfs_timestamp(&now);
+ TMPFS_NODE_LOCK(node);
if (node->tn_status & TMPFS_NODE_ACCESSED) {
if (acc == NULL)
acc = &now;
@@ -1758,11 +1798,12 @@ tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
mod = &now;
node->tn_mtime = *mod;
}
- if (node->tn_status & TMPFS_NODE_CHANGED) {
+ if (node->tn_status & TMPFS_NODE_CHANGED)
node->tn_ctime = now;
- }
- node->tn_status &=
- ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
+ node->tn_status &= ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
+ TMPFS_NODE_CHANGED);
+ TMPFS_NODE_UNLOCK(node);
+
}
void
@@ -1794,14 +1835,13 @@ tmpfs_truncate(struct vnode *vp, off_t length)
return (EFBIG);
error = tmpfs_reg_resize(vp, length, FALSE);
- if (error == 0) {
+ if (error == 0)
node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
- }
out:
tmpfs_update(vp);
- return error;
+ return (error);
}
static __inline int
diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c
index 7af143b..65a2a12 100644
--- a/sys/fs/tmpfs/tmpfs_vfsops.c
+++ b/sys/fs/tmpfs/tmpfs_vfsops.c
@@ -78,7 +78,7 @@ static int tmpfs_statfs(struct mount *, struct statfs *);
static const char *tmpfs_opts[] = {
"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
- "union", NULL
+ "union", "nonc", NULL
};
static const char *tmpfs_updateopts[] = {
@@ -137,6 +137,7 @@ tmpfs_mount(struct mount *mp)
struct tmpfs_node *root;
struct thread *td = curthread;
int error;
+ bool nonc;
/* Size counters. */
u_quad_t pages;
off_t nodes_max, size_max, maxfilesize;
@@ -185,11 +186,12 @@ tmpfs_mount(struct mount *mp)
size_max = 0;
if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
maxfilesize = 0;
+ nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
/* Do not allow mounts if we do not have enough memory to preserve
* the minimum reserved pages. */
if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
- return ENOSPC;
+ return (ENOSPC);
/* Get the maximum number of memory pages this file system is
* allowed to use, based on the maximum size the user passed in
@@ -218,37 +220,35 @@ tmpfs_mount(struct mount *mp)
tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
M_TMPFSMNT, M_WAITOK | M_ZERO);
- mtx_init(&tmp->allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
+ mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
tmp->tm_nodes_max = nodes_max;
tmp->tm_nodes_inuse = 0;
+ tmp->tm_refcount = 1;
tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
LIST_INIT(&tmp->tm_nodes_used);
tmp->tm_pages_max = pages;
tmp->tm_pages_used = 0;
- tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->allnode_lock);
+ tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock);
tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
- sizeof(struct tmpfs_dirent),
- NULL, NULL, NULL, NULL,
+ sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
tmp->tm_node_pool = uma_zcreate("TMPFS node",
- sizeof(struct tmpfs_node),
- tmpfs_node_ctor, tmpfs_node_dtor,
- tmpfs_node_init, tmpfs_node_fini,
- UMA_ALIGN_PTR, 0);
+ sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
+ tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
+ tmp->tm_nonc = nonc;
/* Allocate the root node. */
- error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid,
- root_gid, root_mode & ALLPERMS, NULL, NULL,
- VNOVAL, &root);
+ error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
+ root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
if (error != 0 || root == NULL) {
- uma_zdestroy(tmp->tm_node_pool);
- uma_zdestroy(tmp->tm_dirent_pool);
- delete_unrhdr(tmp->tm_ino_unr);
- free(tmp, M_TMPFSMNT);
- return error;
+ uma_zdestroy(tmp->tm_node_pool);
+ uma_zdestroy(tmp->tm_dirent_pool);
+ delete_unrhdr(tmp->tm_ino_unr);
+ free(tmp, M_TMPFSMNT);
+ return (error);
}
KASSERT(root->tn_id == 2,
("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
@@ -306,25 +306,17 @@ tmpfs_unmount(struct mount *mp, int mntflags)
TMPFS_LOCK(tmp);
while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
- TMPFS_UNLOCK(tmp);
+ TMPFS_NODE_LOCK(node);
if (node->tn_type == VDIR)
tmpfs_dir_destroy(tmp, node);
- tmpfs_free_node(tmp, node);
- TMPFS_LOCK(tmp);
+ if (tmpfs_free_node_locked(tmp, node, true))
+ TMPFS_LOCK(tmp);
+ else
+ TMPFS_NODE_UNLOCK(node);
}
- TMPFS_UNLOCK(tmp);
-
- uma_zdestroy(tmp->tm_dirent_pool);
- uma_zdestroy(tmp->tm_node_pool);
- delete_unrhdr(tmp->tm_ino_unr);
- mtx_destroy(&tmp->allnode_lock);
- MPASS(tmp->tm_pages_used == 0);
- MPASS(tmp->tm_nodes_inuse == 0);
-
- /* Throw away the tmpfs_mount structure. */
- free(mp->mnt_data, M_TMPFSMNT);
mp->mnt_data = NULL;
+ tmpfs_free_tmp(tmp);
vfs_write_resume(mp, VR_START_WRITE);
MNT_ILOCK(mp);
@@ -334,52 +326,74 @@ tmpfs_unmount(struct mount *mp, int mntflags)
return (0);
}
+void
+tmpfs_free_tmp(struct tmpfs_mount *tmp)
+{
+
+ MPASS(tmp->tm_refcount > 0);
+ tmp->tm_refcount--;
+ if (tmp->tm_refcount > 0) {
+ TMPFS_UNLOCK(tmp);
+ return;
+ }
+ TMPFS_UNLOCK(tmp);
+
+ uma_zdestroy(tmp->tm_dirent_pool);
+ uma_zdestroy(tmp->tm_node_pool);
+ delete_unrhdr(tmp->tm_ino_unr);
+
+ mtx_destroy(&tmp->tm_allnode_lock);
+ MPASS(tmp->tm_pages_used == 0);
+ MPASS(tmp->tm_nodes_inuse == 0);
+
+ free(tmp, M_TMPFSMNT);
+}
+
static int
tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
{
int error;
- error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
- if (!error)
+ error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
+ if (error == 0)
(*vpp)->v_vflag |= VV_ROOT;
-
- return error;
+ return (error);
}
static int
tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
struct vnode **vpp)
{
- boolean_t found;
struct tmpfs_fid *tfhp;
struct tmpfs_mount *tmp;
struct tmpfs_node *node;
+ int error;
tmp = VFS_TO_TMPFS(mp);
tfhp = (struct tmpfs_fid *)fhp;
if (tfhp->tf_len != sizeof(struct tmpfs_fid))
- return EINVAL;
+ return (EINVAL);
if (tfhp->tf_id >= tmp->tm_nodes_max)
- return EINVAL;
-
- found = FALSE;
+ return (EINVAL);
TMPFS_LOCK(tmp);
LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
if (node->tn_id == tfhp->tf_id &&
node->tn_gen == tfhp->tf_gen) {
- found = TRUE;
+ tmpfs_ref_node(node);
break;
}
}
TMPFS_UNLOCK(tmp);
- if (found)
- return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
-
- return (EINVAL);
+ if (node != NULL) {
+ error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
+ tmpfs_free_node(tmp, node);
+ } else
+ error = EINVAL;
+ return (error);
}
/* ARGSUSED2 */
@@ -395,7 +409,7 @@ tmpfs_statfs(struct mount *mp, struct statfs *sbp)
sbp->f_bsize = PAGE_SIZE;
used = tmpfs_pages_used(tmp);
- if (tmp->tm_pages_max != SIZE_MAX)
+ if (tmp->tm_pages_max != ULONG_MAX)
sbp->f_blocks = tmp->tm_pages_max;
else
sbp->f_blocks = used + tmpfs_mem_avail();
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index f01c8be..6d6982f 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -76,13 +76,11 @@ tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
}
static int
-tmpfs_lookup(struct vop_cachedlookup_args *v)
+tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
{
- struct vnode *dvp = v->a_dvp;
- struct vnode **vpp = v->a_vpp;
- struct componentname *cnp = v->a_cnp;
struct tmpfs_dirent *de;
- struct tmpfs_node *dnode;
+ struct tmpfs_node *dnode, *pnode;
+ struct tmpfs_mount *tm;
int error;
dnode = VP_TO_TMPFS_DIR(dvp);
@@ -104,8 +102,12 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
goto out;
}
if (cnp->cn_flags & ISDOTDOT) {
+ tm = VFS_TO_TMPFS(dvp->v_mount);
+ pnode = dnode->tn_dir.tn_parent;
+ tmpfs_ref_node(pnode);
error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
- dnode->tn_dir.tn_parent, cnp->cn_lkflags, vpp);
+ pnode, cnp->cn_lkflags, vpp);
+ tmpfs_free_node(tm, pnode);
if (error != 0)
goto out;
} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
@@ -117,10 +119,12 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
if (de != NULL && de->td_node == NULL)
cnp->cn_flags |= ISWHITEOUT;
if (de == NULL || de->td_node == NULL) {
- /* The entry was not found in the directory.
+ /*
+ * The entry was not found in the directory.
* This is OK if we are creating or renaming an
* entry and are working on the last component of
- * the path name. */
+ * the path name.
+ */
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_nameiop == CREATE || \
cnp->cn_nameiop == RENAME ||
@@ -132,8 +136,10 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
if (error != 0)
goto out;
- /* Keep the component name in the buffer for
- * future uses. */
+ /*
+ * Keep the component name in the buffer for
+ * future uses.
+ */
cnp->cn_flags |= SAVENAME;
error = EJUSTRETURN;
@@ -142,14 +148,18 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
} else {
struct tmpfs_node *tnode;
- /* The entry was found, so get its associated
- * tmpfs_node. */
+ /*
+ * The entry was found, so get its associated
+ * tmpfs_node.
+ */
tnode = de->td_node;
- /* If we are not at the last path component and
+ /*
+ * If we are not at the last path component and
* found a non-directory or non-link entry (which
* may itself be pointing to a directory), raise
- * an error. */
+ * an error.
+ */
if ((tnode->tn_type != VDIR &&
tnode->tn_type != VLNK) &&
!(cnp->cn_flags & ISLASTCN)) {
@@ -157,9 +167,11 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
goto out;
}
- /* If we are deleting or renaming the entry, keep
+ /*
+ * If we are deleting or renaming the entry, keep
* track of its tmpfs_dirent so that it can be
- * easily deleted later. */
+ * easily deleted later.
+ */
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_nameiop == DELETE ||
cnp->cn_nameiop == RENAME)) {
@@ -175,8 +187,9 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
goto out;
if ((dnode->tn_mode & S_ISTXT) &&
- VOP_ACCESS(dvp, VADMIN, cnp->cn_cred, cnp->cn_thread) &&
- VOP_ACCESS(*vpp, VADMIN, cnp->cn_cred, cnp->cn_thread)) {
+ VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
+ cnp->cn_thread) && VOP_ACCESS(*vpp, VADMIN,
+ cnp->cn_cred, cnp->cn_thread)) {
error = EPERM;
vput(*vpp);
*vpp = NULL;
@@ -192,18 +205,36 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
}
}
- /* Store the result of this lookup in the cache. Avoid this if the
+ /*
+ * Store the result of this lookup in the cache. Avoid this if the
* request was for creation, as it does not improve timings on
- * emprical tests. */
- if ((cnp->cn_flags & MAKEENTRY) != 0)
+ * emprical tests.
+ */
+ if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
cache_enter(dvp, *vpp, cnp);
out:
- /* If there were no errors, *vpp cannot be null and it must be
- * locked. */
+ /*
+ * If there were no errors, *vpp cannot be null and it must be
+ * locked.
+ */
MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));
- return error;
+ return (error);
+}
+
+static int
+tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
+{
+
+ return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
+}
+
+static int
+tmpfs_lookup(struct vop_lookup_args *v)
+{
+
+ return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
}
static int
@@ -218,7 +249,7 @@ tmpfs_create(struct vop_create_args *v)
MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
- if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
+ if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
cache_enter(dvp, *vpp, cnp);
return (error);
}
@@ -441,7 +472,7 @@ tmpfs_read(struct vop_read_args *v)
if (uio->uio_offset < 0)
return (EINVAL);
node = VP_TO_TMPFS_NODE(vp);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
}
@@ -989,10 +1020,12 @@ tmpfs_rename(struct vop_rename_args *v)
tmpfs_dir_attach(tdvp, de);
- cache_purge(fvp);
- if (tvp != NULL)
- cache_purge(tvp);
- cache_purge_negative(tdvp);
+ if (tmpfs_use_nc(fvp)) {
+ cache_purge(fvp);
+ if (tvp != NULL)
+ cache_purge(tvp);
+ cache_purge_negative(tdvp);
+ }
error = 0;
@@ -1078,8 +1111,8 @@ tmpfs_rmdir(struct vop_rmdir_args *v)
v->a_cnp->cn_namelen));
/* Check flags to see if we are allowed to remove the directory. */
- if (dnode->tn_flags & APPEND
- || node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
+ if ((dnode->tn_flags & APPEND) != 0 ||
+ (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
error = EPERM;
goto out;
}
@@ -1092,23 +1125,23 @@ tmpfs_rmdir(struct vop_rmdir_args *v)
/* No vnode should be allocated for this entry from this point */
TMPFS_NODE_LOCK(node);
- TMPFS_ASSERT_ELOCKED(node);
node->tn_links--;
node->tn_dir.tn_parent = NULL;
- node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
+ node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
TMPFS_NODE_MODIFIED;
TMPFS_NODE_UNLOCK(node);
TMPFS_NODE_LOCK(dnode);
- TMPFS_ASSERT_ELOCKED(dnode);
dnode->tn_links--;
- dnode->tn_status |= TMPFS_NODE_ACCESSED | \
- TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
+ dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED |
+ TMPFS_NODE_MODIFIED;
TMPFS_NODE_UNLOCK(dnode);
- cache_purge(dvp);
- cache_purge(vp);
+ if (tmpfs_use_nc(dvp)) {
+ cache_purge(dvp);
+ cache_purge(vp);
+ }
/* Free the directory entry we just deleted. Note that the node
* referred by it will not be removed until the vnode is really
@@ -1216,9 +1249,9 @@ tmpfs_readlink(struct vop_readlink_args *v)
error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
uio);
- node->tn_status |= TMPFS_NODE_ACCESSED;
+ tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
- return error;
+ return (error);
}
static int
@@ -1252,10 +1285,10 @@ tmpfs_reclaim(struct vop_reclaim_args *v)
else
vnode_destroy_vobject(vp);
vp->v_object = NULL;
- cache_purge(vp);
+ if (tmpfs_use_nc(vp))
+ cache_purge(vp);
TMPFS_NODE_LOCK(node);
- TMPFS_ASSERT_ELOCKED(node);
tmpfs_free_vp(vp);
/* If the node referenced by this vnode was deleted by the user,
@@ -1385,13 +1418,139 @@ tmpfs_whiteout(struct vop_whiteout_args *ap)
}
}
+static int
+tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
+ struct tmpfs_dirent **pde)
+{
+ struct tmpfs_dir_cursor dc;
+ struct tmpfs_dirent *de;
+
+ for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
+ de = tmpfs_dir_next(tnp, &dc)) {
+ if (de->td_node == tn) {
+ *pde = de;
+ return (0);
+ }
+ }
+ return (ENOENT);
+}
+
+static int
+tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
+ struct tmpfs_node *tnp, char *buf, int *buflen, struct vnode **dvp)
+{
+ struct tmpfs_dirent *de;
+ int error, i;
+
+ error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
+ dvp);
+ if (error != 0)
+ return (error);
+ error = tmpfs_vptocnp_dir(tn, tnp, &de);
+ if (error == 0) {
+ i = *buflen;
+ i -= de->td_namelen;
+ if (i < 0) {
+ error = ENOMEM;
+ } else {
+ bcopy(de->ud.td_name, buf + i, de->td_namelen);
+ *buflen = i;
+ }
+ }
+ if (error == 0) {
+ if (vp != *dvp)
+ VOP_UNLOCK(*dvp, 0);
+ } else {
+ if (vp != *dvp)
+ vput(*dvp);
+ else
+ vrele(vp);
+ }
+ return (error);
+}
+
+static int
+tmpfs_vptocnp(struct vop_vptocnp_args *ap)
+{
+ struct vnode *vp, **dvp;
+ struct tmpfs_node *tn, *tnp, *tnp1;
+ struct tmpfs_dirent *de;
+ struct tmpfs_mount *tm;
+ char *buf;
+ int *buflen;
+ int error;
+
+ vp = ap->a_vp;
+ dvp = ap->a_vpp;
+ buf = ap->a_buf;
+ buflen = ap->a_buflen;
+
+ tm = VFS_TO_TMPFS(vp->v_mount);
+ tn = VP_TO_TMPFS_NODE(vp);
+ if (tn->tn_type == VDIR) {
+ tnp = tn->tn_dir.tn_parent;
+ if (tnp == NULL)
+ return (ENOENT);
+ tmpfs_ref_node(tnp);
+ error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
+ buflen, dvp);
+ tmpfs_free_node(tm, tnp);
+ return (error);
+ }
+restart:
+ TMPFS_LOCK(tm);
+ LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
+ if (tnp->tn_type != VDIR)
+ continue;
+ TMPFS_NODE_LOCK(tnp);
+ tmpfs_ref_node_locked(tnp);
+
+ /*
+ * tn_vnode cannot be instantiated while we hold the
+ * node lock, so the directory cannot be changed while
+ * we iterate over it. Do this to avoid instantiating
+ * vnode for directories which cannot point to our
+ * node.
+ */
+ error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
+ &de) : 0;
+
+ if (error == 0) {
+ TMPFS_NODE_UNLOCK(tnp);
+ TMPFS_UNLOCK(tm);
+ error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
+ dvp);
+ if (error == 0) {
+ tmpfs_free_node(tm, tnp);
+ return (0);
+ }
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ tmpfs_free_node(tm, tnp);
+ return (ENOENT);
+ }
+ TMPFS_LOCK(tm);
+ TMPFS_NODE_LOCK(tnp);
+ }
+ if (tmpfs_free_node_locked(tm, tnp, false)) {
+ goto restart;
+ } else {
+ KASSERT(tnp->tn_refcount > 0,
+ ("node %p refcount zero", tnp));
+ tnp1 = LIST_NEXT(tnp, tn_entries);
+ TMPFS_NODE_UNLOCK(tnp);
+ }
+ }
+ TMPFS_UNLOCK(tm);
+ return (ENOENT);
+}
+
/*
- * vnode operations vector used for files stored in a tmpfs file system.
+ * Vnode operations vector used for files stored in a tmpfs file system.
*/
struct vop_vector tmpfs_vnodeop_entries = {
.vop_default = &default_vnodeops,
.vop_lookup = vfs_cache_lookup,
- .vop_cachedlookup = tmpfs_lookup,
+ .vop_cachedlookup = tmpfs_cached_lookup,
.vop_create = tmpfs_create,
.vop_mknod = tmpfs_mknod,
.vop_open = tmpfs_open,
@@ -1417,5 +1576,13 @@ struct vop_vector tmpfs_vnodeop_entries = {
.vop_vptofh = tmpfs_vptofh,
.vop_whiteout = tmpfs_whiteout,
.vop_bmap = VOP_EOPNOTSUPP,
+ .vop_vptocnp = tmpfs_vptocnp,
};
+/*
+ * Same vector for mounts which do not use namecache.
+ */
+struct vop_vector tmpfs_vnodeop_nonc_entries = {
+ .vop_default = &tmpfs_vnodeop_entries,
+ .vop_lookup = tmpfs_lookup,
+};
diff --git a/sys/fs/tmpfs/tmpfs_vnops.h b/sys/fs/tmpfs/tmpfs_vnops.h
index 1e06c13..db614a05 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.h
+++ b/sys/fs/tmpfs/tmpfs_vnops.h
@@ -44,6 +44,7 @@
*/
extern struct vop_vector tmpfs_vnodeop_entries;
+extern struct vop_vector tmpfs_vnodeop_nonc_entries;
vop_access_t tmpfs_access;
vop_getattr_t tmpfs_getattr;
diff --git a/sys/geom/geom_disk.c b/sys/geom/geom_disk.c
index a0a357f..18402c4 100644
--- a/sys/geom/geom_disk.c
+++ b/sys/geom/geom_disk.c
@@ -589,12 +589,12 @@ g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g
* special cases, and there's also a valid range.
*/
sbuf_printf(sb, "%s<rotationrate>", indent);
- if (dp->d_rotation_rate == 0) /* Old drives don't */
- sbuf_printf(sb, "unknown"); /* report RPM. */
- else if (dp->d_rotation_rate == 1) /* Since 0 is used */
- sbuf_printf(sb, "0"); /* above, SSDs use 1. */
- else if ((dp->d_rotation_rate >= 0x041) &&
- (dp->d_rotation_rate <= 0xfffe))
+ if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */
+ sbuf_printf(sb, "unknown"); /* don't report RPM. */
+ else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING)
+ sbuf_printf(sb, "0");
+ else if ((dp->d_rotation_rate >= DISK_RR_MIN) &&
+ (dp->d_rotation_rate <= DISK_RR_MAX))
sbuf_printf(sb, "%u", dp->d_rotation_rate);
else
sbuf_printf(sb, "invalid");
diff --git a/sys/geom/geom_disk.h b/sys/geom/geom_disk.h
index 09194c0..948c664 100644
--- a/sys/geom/geom_disk.h
+++ b/sys/geom/geom_disk.h
@@ -113,6 +113,11 @@ struct disk {
#define DISKFLAG_DIRECT_COMPLETION 0x20
#define DISKFLAG_LACKS_ROTRATE 0x40
+#define DISK_RR_UNKNOWN 0
+#define DISK_RR_NON_ROTATING 1
+#define DISK_RR_MIN 0x0401
+#define DISK_RR_MAX 0xfffe
+
struct disk *disk_alloc(void);
void disk_create(struct disk *disk, int version);
void disk_destroy(struct disk *disk);
diff --git a/sys/geom/multipath/g_multipath.c b/sys/geom/multipath/g_multipath.c
index b461747..0c24cd3 100644
--- a/sys/geom/multipath/g_multipath.c
+++ b/sys/geom/multipath/g_multipath.c
@@ -923,6 +923,7 @@ g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp,
struct g_provider *pp;
const char *mpname;
static const char devpf[6] = "/dev/";
+ int error;
g_topology_assert();
@@ -972,10 +973,9 @@ g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp,
return;
}
- /*
- * Now add....
- */
- (void) g_multipath_add_disk(gp, pp);
+ error = g_multipath_add_disk(gp, pp);
+ if (error != 0)
+ gctl_error(req, "Provider addition error: %d", error);
}
static void
diff --git a/sys/geom/vinum/geom_vinum_state.c b/sys/geom/vinum/geom_vinum_state.c
index 568c784..67486da 100644
--- a/sys/geom/vinum/geom_vinum_state.c
+++ b/sys/geom/vinum/geom_vinum_state.c
@@ -183,7 +183,7 @@ gv_set_sd_state(struct gv_sd *s, int newstate, int flags)
* Only do this if we're forced, since it usually is done
* internally, and then we do use the force flag.
*/
- if (!flags & GV_SETSTATE_FORCE)
+ if (!(flags & GV_SETSTATE_FORCE))
return (GV_ERR_SETSTATE);
break;
diff --git a/sys/i386/i386/initcpu.c b/sys/i386/i386/initcpu.c
index 7025021..bc6a55b 100644
--- a/sys/i386/i386/initcpu.c
+++ b/sys/i386/i386/initcpu.c
@@ -826,14 +826,18 @@ initializecpucache(void)
* CPUID_SS feature even though the native CPU supports it.
*/
TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
- if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
+ if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
cpu_feature &= ~CPUID_CLFSH;
+ cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
+ }
/*
- * Allow to disable CLFLUSH feature manually by
- * hw.clflush_disable tunable.
+ * The kernel's use of CLFLUSH{,OPT} can be disabled manually
+ * by setting the hw.clflush_disable tunable.
*/
- if (hw_clflush_disable == 1)
+ if (hw_clflush_disable == 1) {
cpu_feature &= ~CPUID_CLFSH;
+ cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
+ }
#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
/*
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index f55900c..607b08b 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -3157,8 +3157,7 @@ init386(first)
#else
register_t
-init386(first)
- int first;
+init386(int first)
{
struct gate_descriptor *gdp;
int gsel_tss, metadata_missing, x, pa;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 299ee77..c383f34 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -517,7 +517,14 @@ pmap_bootstrap(vm_paddr_t firstaddr)
for (i = 1; i < NKPT; i++)
PTD[i] = 0;
- /* Initialize the PAT MSR if present. */
+ /*
+ * Initialize the PAT MSR if present.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment. We assume
+ * that PAT support implies PGE and in reverse, PGE presence
+ * comes with PAT. Both features were added for Pentium Pro.
+ */
pmap_init_pat();
/* Turn on PG_G on kernel page(s) */
@@ -545,7 +552,10 @@ pmap_init_pat(void)
pat_table[PAT_WRITE_PROTECTED] = 3;
pat_table[PAT_UNCACHED] = 3;
- /* Bail if this CPU doesn't implement PAT. */
+ /*
+ * Bail if this CPU doesn't implement PAT.
+ * We assume that PAT support implies PGE.
+ */
if ((cpu_feature & CPUID_PAT) == 0) {
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_index[i] = pat_table[i];
@@ -1222,9 +1232,8 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
if ((cpu_feature & CPUID_SS) != 0 && !force)
; /* If "Self Snoop" is supported and allowed, do nothing. */
- else if ((cpu_feature & CPUID_CLFSH) != 0 &&
+ else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
-
#ifdef DEV_APIC
/*
* XXX: Some CPUs fault, hang, or trash the local APIC
@@ -1236,16 +1245,29 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
return;
#endif
/*
- * Otherwise, do per-cache line flush. Use the mfence
+ * Otherwise, do per-cache line flush. Use the sfence
* instruction to insure that previous stores are
* included in the write-back. The processor
* propagates flush to other processors in the cache
* coherence domain.
*/
- mfence();
+ sfence();
+ for (; sva < eva; sva += cpu_clflush_line_size)
+ clflushopt(sva);
+ sfence();
+ } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
+ eva - sva < PMAP_CLFLUSH_THRESHOLD) {
+ if (pmap_kextract(sva) == lapic_paddr)
+ return;
+ /*
+ * Writes are ordered by CLFLUSH on Intel CPUs.
+ */
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
for (; sva < eva; sva += cpu_clflush_line_size)
clflush(sva);
- mfence();
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
} else {
/*
@@ -2675,6 +2697,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
vm_paddr_t mptepa;
vm_page_t mpte;
struct spglist free;
+ vm_offset_t sva;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@@ -2697,8 +2720,9 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
- pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
- pmap_invalidate_page(pmap, trunc_4mpage(va));
+ sva = trunc_4mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free);
+ pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
@@ -2869,9 +2893,24 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
/*
* Machines that don't support invlpg, also don't support
* PG_G.
+ *
+ * When workaround_erratum383 is false, a promotion to a 2M/4M
+ * page mapping does not invalidate the 512/1024 4K page mappings
+ * from the TLB. Consequently, at this point, the TLB may
+ * hold both 4K and 2M/4M page mappings. Therefore, the entire
+ * range of addresses must be invalidated here. In contrast,
+ * when workaround_erratum383 is true, a promotion does
+ * invalidate the 512/1024 4K page mappings, and so a single INVLPG
+ * suffices to invalidate the 2M/4M page mapping.
*/
- if (oldpde & PG_G)
- pmap_invalidate_page(kernel_pmap, sva);
+ if ((oldpde & PG_G) != 0) {
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ }
+
pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
if (oldpde & PG_MANAGED) {
pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
@@ -3181,9 +3220,14 @@ retry:
if (newpde != oldpde) {
if (!pde_cmpset(pde, oldpde, newpde))
goto retry;
- if (oldpde & PG_G)
- pmap_invalidate_page(pmap, sva);
- else
+ if (oldpde & PG_G) {
+ /* See pmap_remove_pde() for explanation. */
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ } else
anychanged = TRUE;
}
return (anychanged);
@@ -5316,8 +5360,10 @@ pmap_flush_page(vm_page_t m)
{
struct sysmaps *sysmaps;
vm_offset_t sva, eva;
+ bool useclflushopt;
- if ((cpu_feature & CPUID_CLFSH) != 0) {
+ useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
+ if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) {
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
mtx_lock(&sysmaps->lock);
if (*sysmaps->CMAP2)
@@ -5330,14 +5376,25 @@ pmap_flush_page(vm_page_t m)
eva = sva + PAGE_SIZE;
/*
- * Use mfence despite the ordering implied by
- * mtx_{un,}lock() because clflush is not guaranteed
- * to be ordered by any other instruction.
+ * Use mfence or sfence despite the ordering implied by
+ * mtx_{un,}lock() because clflush on non-Intel CPUs
+ * and clflushopt are not guaranteed to be ordered by
+ * any other instruction.
*/
- mfence();
- for (; sva < eva; sva += cpu_clflush_line_size)
- clflush(sva);
- mfence();
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ for (; sva < eva; sva += cpu_clflush_line_size) {
+ if (useclflushopt)
+ clflushopt(sva);
+ else
+ clflush(sva);
+ }
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
*sysmaps->CMAP2 = 0;
sched_unpin();
mtx_unlock(&sysmaps->lock);
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index c0e65d5..a76a5ec 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -209,11 +209,7 @@ alloc_fpusave(int flags)
* ready to run and return to user mode.
*/
void
-cpu_fork(td1, p2, td2, flags)
- register struct thread *td1;
- register struct proc *p2;
- struct thread *td2;
- int flags;
+cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
{
register struct proc *p1;
struct pcb *pcb2;
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index d3a7e08..02ed27a 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -175,6 +175,13 @@ mfence(void)
__asm __volatile("mfence" : : : "memory");
}
+static __inline void
+sfence(void)
+{
+
+ __asm __volatile("sfence" : : : "memory");
+}
+
#ifdef _KERNEL
#define HAVE_INLINE_FFS
diff --git a/sys/i386/isa/npx.c b/sys/i386/isa/npx.c
index dff0f5c..bc42bbb 100644
--- a/sys/i386/isa/npx.c
+++ b/sys/i386/isa/npx.c
@@ -559,8 +559,7 @@ SYSINIT(npxinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, npxinitstate, NULL);
* Free coprocessor (if we have it).
*/
void
-npxexit(td)
- struct thread *td;
+npxexit(struct thread *td)
{
critical_enter();
@@ -590,7 +589,7 @@ npxexit(td)
}
int
-npxformat()
+npxformat(void)
{
if (!hw_float)
@@ -970,7 +969,7 @@ npxresume(union savefpu *addr)
}
void
-npxdrop()
+npxdrop(void)
{
struct thread *td;
@@ -1203,8 +1202,7 @@ fpu_clean_state(void)
#endif /* CPU_ENABLE_SSE */
static void
-fpurstor(addr)
- union savefpu *addr;
+fpurstor(union savefpu *addr)
{
#ifdef CPU_ENABLE_SSE
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 293a65d..bdf709f 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -1176,9 +1176,31 @@ core_output(struct vnode *vp, void *base, size_t len, off_t offset,
panic("shouldn't be here");
#endif
} else {
+ /*
+ * EFAULT is a non-fatal error that we can get, for example,
+ * if the segment is backed by a file but extends beyond its
+ * end.
+ */
error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset,
UIO_USERSPACE, IO_UNIT | IO_DIRECT, active_cred, file_cred,
NULL, td);
+ if (error == EFAULT) {
+ log(LOG_WARNING, "Failed to fully fault in a core file "
+ "segment at VA %p with size 0x%zx to be written at "
+ "offset 0x%jx for process %s\n", base, len, offset,
+ curproc->p_comm);
+
+ /*
+ * Write a "real" zero byte at the end of the target
+ * region in the case this is the last segment.
+ * The intermediate space will be implicitly
+ * zero-filled.
+ */
+ error = vn_rdwr_inchunks(UIO_WRITE, vp,
+ __DECONST(void *, zero_region), 1, offset + len - 1,
+ UIO_SYSSPACE, IO_UNIT | IO_DIRECT, active_cred,
+ file_cred, NULL, td);
+ }
}
return (error);
}
@@ -2309,7 +2331,16 @@ compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
while (len) {
if (inbuf != NULL) {
chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len;
- copyin(inbuf, dest_buf, chunk_len);
+
+ /*
+ * We can get EFAULT error here. In that case zero out
+ * the current chunk of the segment.
+ */
+ error = copyin(inbuf, dest_buf, chunk_len);
+ if (error != 0) {
+ bzero(dest_buf, chunk_len);
+ error = 0;
+ }
inbuf += chunk_len;
} else {
chunk_len = len;
diff --git a/sys/kern/kern_procctl.c b/sys/kern/kern_procctl.c
index e6e432e..4ccf646 100644
--- a/sys/kern/kern_procctl.c
+++ b/sys/kern/kern_procctl.c
@@ -243,7 +243,7 @@ reap_kill(struct thread *td, struct proc *p, struct procctl_reaper_kill *rk)
return (ECAPMODE);
if (rk->rk_sig <= 0 || rk->rk_sig > _SIG_MAXSIG)
return (EINVAL);
- if ((rk->rk_flags & ~REAPER_KILL_CHILDREN) != 0)
+ if ((rk->rk_flags & ~(REAPER_KILL_CHILDREN | REAPER_KILL_SUBTREE)) != 0)
return (EINVAL);
PROC_UNLOCK(p);
reap = (p->p_treeflag & P_TREE_REAPER) == 0 ? p->p_reaper : p;
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index d0009b1..f58acd5 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -206,7 +206,22 @@ critical_exit(void)
if (td->td_critnest == 1) {
td->td_critnest = 0;
+
+ /*
+ * Interrupt handlers execute critical_exit() on
+ * leave, and td_owepreempt may be left set by an
+ * interrupt handler only when td_critnest > 0. If we
+ * are decrementing td_critnest from 1 to 0, read
+ * td_owepreempt after decrementing, to not miss the
+ * preempt. Disallow compiler to reorder operations.
+ */
+ __compiler_membar();
if (td->td_owepreempt && !kdb_active) {
+ /*
+ * Microoptimization: we committed to switch,
+ * disable preemption in interrupt handlers
+ * while spinning for the thread lock.
+ */
td->td_critnest = 1;
thread_lock(td);
td->td_critnest--;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index db23e1d..7309ae3 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -963,8 +963,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
sched_load_rem();
td->td_lastcpu = td->td_oncpu;
- preempted = !((td->td_flags & TDF_SLICEEND) ||
- (flags & SWT_RELINQUISH));
+ preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
+ (flags & SW_PREEMPT) != 0;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
td->td_owepreempt = 0;
td->td_oncpu = NOCPU;
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 30aad12..53a46c9 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1870,8 +1870,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
ts->ts_rltick = ticks;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
- preempted = !((td->td_flags & TDF_SLICEEND) ||
- (flags & SWT_RELINQUISH));
+ preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
+ (flags & SW_PREEMPT) != 0;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
td->td_owepreempt = 0;
if (!TD_IS_IDLETHREAD(td))
diff --git a/sys/kern/subr_unit.c b/sys/kern/subr_unit.c
index 3bf7aaf..d53ec1e 100644
--- a/sys/kern/subr_unit.c
+++ b/sys/kern/subr_unit.c
@@ -192,7 +192,7 @@ CTASSERT(sizeof(struct unr) == sizeof(struct unrb));
* Consistency check function.
*
* Checks the internal consistency as well as we can.
- *
+ *
* Called at all boundaries of this API.
*/
static void
@@ -220,7 +220,7 @@ check_unrhdr(struct unrhdr *uh, int line)
("UNR inconsistency: busy %u found %u (line %d)\n",
ub->busy, w, line));
y += w;
- } else if (up->ptr != NULL)
+ } else if (up->ptr != NULL)
y += up->len;
}
KASSERT (y == uh->busy,
@@ -355,7 +355,7 @@ is_bitmap(struct unrhdr *uh, struct unr *up)
/*
* Look for sequence of items which can be combined into a bitmap, if
* multiple are present, take the one which saves most memory.
- *
+ *
* Return (1) if a sequence was found to indicate that another call
* might be able to do more. Return (0) if we found no suitable sequence.
*
@@ -582,7 +582,7 @@ alloc_unrl(struct unrhdr *uh)
}
/*
- * We can always allocate from the first list element, so if we have
+ * We can always allocate from the first list element, so if we have
* nothing on the list, we must have run out of unit numbers.
*/
if (up == NULL)
@@ -797,7 +797,7 @@ free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
/* Handle bitmap items */
if (is_bitmap(uh, up)) {
ub = up->ptr;
-
+
KASSERT(bit_test(ub->map, item) != 0,
("UNR: Freeing free item %d (bitmap)\n", item));
bit_clear(ub->map, item);
@@ -900,7 +900,7 @@ print_unr(struct unrhdr *uh, struct unr *up)
for (x = 0; x < up->len; x++) {
if (bit_test(ub->map, x))
printf("#");
- else
+ else
printf(" ");
}
printf("]\n");
diff --git a/sys/kern/uipc_mbuf2.c b/sys/kern/uipc_mbuf2.c
index 00472d3..f69b47e 100644
--- a/sys/kern/uipc_mbuf2.c
+++ b/sys/kern/uipc_mbuf2.c
@@ -161,7 +161,7 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp)
* the target data is on <n, off>.
* if we got enough data on the mbuf "n", we're done.
*/
- if ((off == 0 || offp) && len <= n->m_len - off && writable)
+ if ((off == 0 || offp) && len <= n->m_len - off)
goto ok;
/*
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 9f922db..5701e5a 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -464,7 +464,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
- if (sfsp && count < maxcount) {
+ if (sfsp != NULL && count < maxcount) {
sp = &mp->mnt_stat;
/*
* Set these in case the underlying filesystem
@@ -509,7 +509,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
- if (sfsp && count > maxcount)
+ if (sfsp != NULL && count > maxcount)
td->td_retval[0] = maxcount;
else
td->td_retval[0] = count;
diff --git a/sys/modules/cam/Makefile b/sys/modules/cam/Makefile
index c3ecf56..e3404f9 100644
--- a/sys/modules/cam/Makefile
+++ b/sys/modules/cam/Makefile
@@ -14,6 +14,7 @@ SRCS+= opt_cd.h
SRCS+= opt_pt.h
SRCS+= opt_sa.h
SRCS+= opt_ses.h
+SRCS+= opt_ddb.h
SRCS+= device_if.h bus_if.h vnode_if.h
SRCS+= cam.c
SRCS+= cam_compat.c
diff --git a/sys/net/ieee8023ad_lacp.c b/sys/net/ieee8023ad_lacp.c
index 36665ca..7ba5423 100644
--- a/sys/net/ieee8023ad_lacp.c
+++ b/sys/net/ieee8023ad_lacp.c
@@ -523,9 +523,6 @@ lacp_port_create(struct lagg_port *lgp)
struct ifmultiaddr *rifma = NULL;
int error;
- boolean_t active = TRUE; /* XXX should be configurable */
- boolean_t fast = FALSE; /* Configurable via ioctl */
-
bzero((char *)&sdl, sizeof(sdl));
sdl.sdl_len = sizeof(sdl);
sdl.sdl_family = AF_LINK;
@@ -557,9 +554,7 @@ lacp_port_create(struct lagg_port *lgp)
lacp_fill_actorinfo(lp, &lp->lp_actor);
lacp_fill_markerinfo(lp, &lp->lp_marker);
- lp->lp_state =
- (active ? LACP_STATE_ACTIVITY : 0) |
- (fast ? LACP_STATE_TIMEOUT : 0);
+ lp->lp_state = LACP_STATE_ACTIVITY;
lp->lp_aggregator = NULL;
lacp_sm_rx_set_expired(lp);
LACP_UNLOCK(lsc);
diff --git a/sys/net/if_bridge.c b/sys/net/if_bridge.c
index b22e324..7a465b4 100644
--- a/sys/net/if_bridge.c
+++ b/sys/net/if_bridge.c
@@ -866,14 +866,18 @@ bridge_mutecaps(struct bridge_softc *sc)
mask &= bif->bif_savedcaps;
}
+ BRIDGE_XLOCK(sc);
LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
enabled = bif->bif_ifp->if_capenable;
enabled &= ~BRIDGE_IFCAPS_STRIP;
/* strip off mask bits and enable them again if allowed */
enabled &= ~BRIDGE_IFCAPS_MASK;
enabled |= mask;
+ BRIDGE_UNLOCK(sc);
bridge_set_ifcap(sc, bif, enabled);
+ BRIDGE_LOCK(sc);
}
+ BRIDGE_XDROP(sc);
}
@@ -884,6 +888,8 @@ bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
struct ifreq ifr;
int error;
+ BRIDGE_UNLOCK_ASSERT(sc);
+
bzero(&ifr, sizeof(ifr));
ifr.ifr_reqcap = set;
diff --git a/sys/net/if_bridgevar.h b/sys/net/if_bridgevar.h
index 3210c03..480c90a 100644
--- a/sys/net/if_bridgevar.h
+++ b/sys/net/if_bridgevar.h
@@ -280,6 +280,7 @@ struct ifbpstpconf {
#define BRIDGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define BRIDGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define BRIDGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+#define BRIDGE_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
#define BRIDGE_LOCK2REF(_sc, _err) do { \
mtx_assert(&(_sc)->sc_mtx, MA_OWNED); \
if ((_sc)->sc_iflist_xcnt > 0) \
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c
index 93ec82e..5e8a598 100644
--- a/sys/net/if_lagg.c
+++ b/sys/net/if_lagg.c
@@ -862,7 +862,7 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
return (error);
fallback:
- if (lp->lp_ioctl != NULL)
+ if (lp != NULL && lp->lp_ioctl != NULL)
return ((*lp->lp_ioctl)(ifp, cmd, data));
return (EINVAL);
diff --git a/sys/net/if_media.c b/sys/net/if_media.c
index 32e0712..7216220 100644
--- a/sys/net/if_media.c
+++ b/sys/net/if_media.c
@@ -105,6 +105,7 @@ ifmedia_removeall(ifm)
LIST_REMOVE(entry, ifm_list);
free(entry, M_IFADDR);
}
+ ifm->ifm_cur = NULL;
}
/*
diff --git a/sys/netgraph/ng_mppc.c b/sys/netgraph/ng_mppc.c
index 14d44de..02d9dea 100644
--- a/sys/netgraph/ng_mppc.c
+++ b/sys/netgraph/ng_mppc.c
@@ -66,7 +66,7 @@
#if !defined(NETGRAPH_MPPC_COMPRESSION) && !defined(NETGRAPH_MPPC_ENCRYPTION)
#ifdef KLD_MODULE
-/* XXX NETGRAPH_MPPC_COMPRESSION isn't functional yet */
+#define NETGRAPH_MPPC_COMPRESSION
#define NETGRAPH_MPPC_ENCRYPTION
#else
/* This case is indicative of an error in sys/conf files */
@@ -81,7 +81,6 @@ static MALLOC_DEFINE(M_NETGRAPH_MPPC, "netgraph_mppc", "netgraph mppc node");
#endif
#ifdef NETGRAPH_MPPC_COMPRESSION
-/* XXX this file doesn't exist yet, but hopefully someday it will... */
#include <net/mppc.h>
#endif
#ifdef NETGRAPH_MPPC_ENCRYPTION
@@ -546,7 +545,7 @@ err1:
&destCnt, d->history, flags, 0);
/* Check return value */
- KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__));
+ /* KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__)); */
if ((rtn & MPPC_EXPANDED) == 0
&& (rtn & MPPC_COMP_OK) == MPPC_COMP_OK) {
outlen -= destCnt;
@@ -808,7 +807,7 @@ failed:
&sourceCnt, &destCnt, d->history, flags);
/* Check return value */
- KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__));
+ /* KASSERT(rtn != MPPC_INVALID, ("%s: invalid", __func__)); */
if ((rtn & MPPC_DEST_EXHAUSTED) != 0
|| (rtn & MPPC_DECOMP_OK) != MPPC_DECOMP_OK) {
log(LOG_ERR, "%s: decomp returned 0x%x",
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index b5781d3..6dbfeaf 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -1252,8 +1252,8 @@ send:
#ifdef INET6
if (isipv6) {
/*
- * ip6_plen is not need to be filled now, and will be filled
- * in ip6_output.
+ * There is no need to fill in ip6_plen right now.
+ * It will be filled later by ip6_output.
*/
m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) +
diff --git a/sys/ofed/drivers/net/mlx4/main.c b/sys/ofed/drivers/net/mlx4/main.c
index 217220a..46917ac 100644
--- a/sys/ofed/drivers/net/mlx4/main.c
+++ b/sys/ofed/drivers/net/mlx4/main.c
@@ -3637,47 +3637,61 @@ int mlx4_restart_one(struct pci_dev *pdev)
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
/* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6340),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x634a),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6354),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6732),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x673c),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6368),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6750),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6372),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x675a),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6764),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x6746),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
+ { PCI_VDEVICE(MELLANOX, 0x676e),
+ .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1002),
+ .driver_data = MLX4_PCI_DEV_IS_VF },
/* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x1003) },
/* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
- { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1004),
+ .driver_data = MLX4_PCI_DEV_IS_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
{ 0, }
};
diff --git a/sys/sys/unistd.h b/sys/sys/unistd.h
index d6d92ab..0a44fc4 100644
--- a/sys/sys/unistd.h
+++ b/sys/sys/unistd.h
@@ -65,7 +65,7 @@
#define _POSIX_MONOTONIC_CLOCK 200112L
#define _POSIX_NO_TRUNC 1
#define _POSIX_PRIORITIZED_IO (-1)
-#define _POSIX_PRIORITY_SCHEDULING 200112L
+#define _POSIX_PRIORITY_SCHEDULING 0
#define _POSIX_RAW_SOCKETS 200112L
#define _POSIX_REALTIME_SIGNALS 200112L
#define _POSIX_SEMAPHORES 200112L
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 750083f..99bda8c 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -4274,7 +4274,7 @@ DB_SHOW_COMMAND(procvm, procvm)
struct proc *p;
if (have_addr) {
- p = (struct proc *) addr;
+ p = db_lookup_proc(addr);
} else {
p = curproc;
}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index f45aa9e..5dcb804 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -192,8 +192,8 @@ struct vm_object {
#define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */
#define OBJ_TMPFS 0x8000 /* has tmpfs vnode allocated */
-#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
-#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
+#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
+#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
#ifdef _KERNEL
diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c
index 1bb8854..5278784 100644
--- a/sys/x86/x86/mca.c
+++ b/sys/x86/x86/mca.c
@@ -249,7 +249,7 @@ mca_error_mmtype(uint16_t mca_error)
return ("???");
}
-static int __nonnull(1)
+static int
mca_mute(const struct mca_record *rec)
{
@@ -278,7 +278,7 @@ mca_mute(const struct mca_record *rec)
}
/* Dump details about a single machine check. */
-static void __nonnull(1)
+static void
mca_log(const struct mca_record *rec)
{
uint16_t mca_error;
@@ -417,7 +417,7 @@ mca_log(const struct mca_record *rec)
printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
}
-static int __nonnull(2)
+static int
mca_check_status(int bank, struct mca_record *rec)
{
uint64_t status;
@@ -484,7 +484,7 @@ mca_refill(void *context, int pending)
mca_fill_freelist();
}
-static void __nonnull(2)
+static void
mca_record_entry(enum scan_mode mode, const struct mca_record *record)
{
struct mca_internal *rec;
OpenPOWER on IntegriCloud