summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormarkm <markm@FreeBSD.org>2013-08-24 13:42:20 +0000
committermarkm <markm@FreeBSD.org>2013-08-24 13:42:20 +0000
commit6228164acab21e650d8ce41758f11a9188cdcf9d (patch)
treef402d81e3fb54380b80e4c1a1d1d5bfe58f4f519
parent7afe86e84d4eb4f4ecfc090764da8acb5f214ae6 (diff)
parent413bf347cd8d75b5cc702edaa5b26ae8b14c9f6b (diff)
downloadFreeBSD-src-6228164acab21e650d8ce41758f11a9188cdcf9d.zip
FreeBSD-src-6228164acab21e650d8ce41758f11a9188cdcf9d.tar.gz
MFC
-rw-r--r--bin/sh/jobs.c6
-rw-r--r--cddl/contrib/opensolaris/cmd/zpool/zpool_main.c5
-rw-r--r--cddl/contrib/opensolaris/common/ctf/ctf_create.c190
-rw-r--r--cddl/contrib/opensolaris/common/ctf/ctf_error.c7
-rw-r--r--cddl/contrib/opensolaris/common/ctf/ctf_impl.h10
-rw-r--r--cddl/contrib/opensolaris/common/ctf/ctf_open.c13
-rw-r--r--cddl/contrib/opensolaris/lib/libctf/common/ctf_lib.c57
-rw-r--r--cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c6
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Expression/IRExecutionUnit.h40
-rw-r--r--contrib/llvm/tools/lldb/source/Expression/ClangExpressionParser.cpp23
-rw-r--r--contrib/llvm/tools/lldb/source/Expression/IRExecutionUnit.cpp22
-rw-r--r--contrib/llvm/tools/lldb/source/Host/common/FileSpec.cpp5
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Disassembler/llvm/DisassemblerLLVMC.cpp23
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp4
-rw-r--r--etc/network.subr6
-rw-r--r--share/man/man4/Makefile4
-rw-r--r--share/man/man4/sa.466
-rw-r--r--share/man/man4/vmx.4112
-rw-r--r--share/man/man9/pfil.919
-rw-r--r--sys/boot/i386/libi386/biosacpi.c2
-rw-r--r--sys/cam/cam_xpt.c14
-rw-r--r--sys/cam/ctl/ctl.c184
-rw-r--r--sys/cam/ctl/ctl.h1
-rw-r--r--sys/cam/ctl/ctl_backend.h7
-rw-r--r--sys/cam/ctl/ctl_backend_block.c11
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c14
-rw-r--r--sys/cam/ctl/ctl_frontend.h5
-rw-r--r--sys/cam/ctl/ctl_io.h2
-rw-r--r--sys/cam/scsi/scsi_sa.c117
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c31
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c133
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c3
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c7
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c31
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c21
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/sys/ctf_api.h7
-rw-r--r--sys/conf/files.amd641
-rw-r--r--sys/conf/files.i3861
-rwxr-xr-xsys/contrib/dev/acpica/acpica_prep.sh2
-rw-r--r--sys/contrib/dev/acpica/changes.txt84
-rw-r--r--sys/contrib/dev/acpica/common/adisasm.c4
-rw-r--r--sys/contrib/dev/acpica/common/adwalk.c10
-rw-r--r--sys/contrib/dev/acpica/common/dmextern.c326
-rw-r--r--sys/contrib/dev/acpica/common/dmtable.c2
-rw-r--r--sys/contrib/dev/acpica/compiler/aslcompile.c25
-rw-r--r--sys/contrib/dev/acpica/compiler/aslglobal.h1
-rw-r--r--sys/contrib/dev/acpica/compiler/aslload.c4
-rw-r--r--sys/contrib/dev/acpica/compiler/aslmain.c1
-rw-r--r--sys/contrib/dev/acpica/compiler/asloptions.c30
-rw-r--r--sys/contrib/dev/acpica/compiler/dtcompile.c2
-rw-r--r--sys/contrib/dev/acpica/components/debugger/dbcmds.c8
-rw-r--r--sys/contrib/dev/acpica/components/debugger/dbfileio.c9
-rw-r--r--sys/contrib/dev/acpica/components/debugger/dbinput.c28
-rw-r--r--sys/contrib/dev/acpica/components/debugger/dbnames.c31
-rw-r--r--sys/contrib/dev/acpica/components/events/evgpeutil.c2
-rw-r--r--sys/contrib/dev/acpica/components/events/evmisc.c18
-rw-r--r--sys/contrib/dev/acpica/components/events/evregion.c26
-rw-r--r--sys/contrib/dev/acpica/components/events/evsci.c87
-rw-r--r--sys/contrib/dev/acpica/components/events/evxface.c163
-rw-r--r--sys/contrib/dev/acpica/components/hardware/hwxface.c25
-rw-r--r--sys/contrib/dev/acpica/components/namespace/nsaccess.c4
-rw-r--r--sys/contrib/dev/acpica/components/namespace/nsdump.c152
-rw-r--r--sys/contrib/dev/acpica/components/namespace/nsxfeval.c9
-rw-r--r--sys/contrib/dev/acpica/components/tables/tbinstal.c9
-rw-r--r--sys/contrib/dev/acpica/components/tables/tbprint.c13
-rw-r--r--sys/contrib/dev/acpica/components/tables/tbxfroot.c3
-rw-r--r--sys/contrib/dev/acpica/components/utilities/utdebug.c2
-rw-r--r--sys/contrib/dev/acpica/components/utilities/utglobal.c3
-rw-r--r--sys/contrib/dev/acpica/include/acdebug.h8
-rw-r--r--sys/contrib/dev/acpica/include/acdisasm.h3
-rw-r--r--sys/contrib/dev/acpica/include/acevents.h10
-rw-r--r--sys/contrib/dev/acpica/include/acexcep.h8
-rw-r--r--sys/contrib/dev/acpica/include/acglobal.h18
-rw-r--r--sys/contrib/dev/acpica/include/aclocal.h13
-rw-r--r--sys/contrib/dev/acpica/include/acnamesp.h8
-rw-r--r--sys/contrib/dev/acpica/include/acpiosxf.h1
-rw-r--r--sys/contrib/dev/acpica/include/acpixf.h13
-rw-r--r--sys/contrib/dev/acpica/include/actypes.h9
-rw-r--r--sys/contrib/dev/acpica/os_specific/service_layers/osunixxf.c1
-rw-r--r--sys/contrib/rdma/krping/krping.c19
-rw-r--r--sys/contrib/rdma/krping/krping.h7
-rw-r--r--sys/contrib/rdma/krping/krping_dev.c3
-rw-r--r--sys/dev/mfi/mfi_cam.c6
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmx.c3305
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmxreg.h316
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmxvar.h319
-rw-r--r--sys/fs/tmpfs/tmpfs_vfsops.c8
-rw-r--r--sys/geom/geom_disk.c6
-rw-r--r--sys/kern/kern_jail.c8
-rw-r--r--sys/kern/kern_mbuf.c69
-rw-r--r--sys/kern/kern_physio.c41
-rw-r--r--sys/kern/uipc_mbuf.c3
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/rdma/Makefile8
-rw-r--r--sys/modules/vmware/Makefile28
-rw-r--r--sys/modules/vmware/vmxnet3/Makefile44
-rw-r--r--sys/net/pfil.c88
-rw-r--r--sys/net/pfil.h93
-rw-r--r--sys/netpfil/ipfw/ip_dummynet.c10
-rw-r--r--sys/netpfil/ipfw/ip_fw2.c2
-rw-r--r--sys/netpfil/ipfw/ip_fw_nat.c74
-rw-r--r--sys/netpfil/ipfw/ip_fw_private.h6
-rw-r--r--sys/ofed/include/linux/net.h10
-rw-r--r--sys/powerpc/powermac/kiic.c2
-rw-r--r--sys/sys/conf.h1
-rw-r--r--sys/sys/jail.h3
-rw-r--r--sys/sys/mbuf.h58
-rw-r--r--sys/sys/mount.h1
-rw-r--r--sys/sys/param.h2
-rw-r--r--usr.sbin/jail/jail.810
114 files changed, 6346 insertions, 603 deletions
diff --git a/bin/sh/jobs.c b/bin/sh/jobs.c
index 4ff5093..b6ea4da 100644
--- a/bin/sh/jobs.c
+++ b/bin/sh/jobs.c
@@ -495,14 +495,10 @@ waitcmdloop(struct job *job)
in_waitcmd++;
do {
if (job != NULL) {
- if (job->state) {
+ if (job->state == JOBDONE) {
status = job->ps[job->nprocs - 1].status;
if (WIFEXITED(status))
retval = WEXITSTATUS(status);
-#if JOBS
- else if (WIFSTOPPED(status))
- retval = WSTOPSIG(status) + 128;
-#endif
else
retval = WTERMSIG(status) + 128;
if (! iflag || ! job->changed)
diff --git a/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c b/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c
index 1686467..0dcf11b 100644
--- a/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c
+++ b/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c
@@ -25,6 +25,7 @@
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
+ * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
*/
#include <solaris.h>
@@ -2128,8 +2129,10 @@ zpool_do_import(int argc, char **argv)
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
- if (errno != 0 || *endptr != '\0')
+ if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
+ searchguid = 0;
+ }
found_config = NULL;
/*
diff --git a/cddl/contrib/opensolaris/common/ctf/ctf_create.c b/cddl/contrib/opensolaris/common/ctf/ctf_create.c
index a4f3df3..239d166 100644
--- a/cddl/contrib/opensolaris/common/ctf/ctf_create.c
+++ b/cddl/contrib/opensolaris/common/ctf/ctf_create.c
@@ -24,13 +24,15 @@
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright (c) 2013, Joyent, Inc. All rights reserved.
+ */
#include <sys/sysmacros.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <ctf_impl.h>
+#include <sys/debug.h>
/*
* This static string is used as the template for initially populating a
@@ -167,6 +169,51 @@ ctf_copy_membnames(ctf_dtdef_t *dtd, uchar_t *s)
}
/*
+ * Only types of dyanmic CTF containers contain reference counts. These
+ * containers are marked RD/WR. Because of that we basically make this a no-op
+ * for compatability with non-dynamic CTF sections. This is also a no-op for
+ * types which are not dynamic types. It is the responsibility of the caller to
+ * make sure it is a valid type. We help that caller out on debug builds.
+ *
+ * Note that the reference counts are not maintained for types that are not
+ * within this container. In other words if we have a type in a parent, that
+ * will not have its reference count increased. On the flip side, the parent
+ * will not be allowed to remove dynamic types if it has children.
+ */
+static void
+ctf_ref_inc(ctf_file_t *fp, ctf_id_t tid)
+{
+ ctf_dtdef_t *dtd = ctf_dtd_lookup(fp, tid);
+
+ if (dtd == NULL)
+ return;
+
+ if (!(fp->ctf_flags & LCTF_RDWR))
+ return;
+
+ dtd->dtd_ref++;
+}
+
+/*
+ * Just as with ctf_ref_inc, this is a no-op on non-writeable containers and the
+ * caller should ensure that this is already a valid type.
+ */
+static void
+ctf_ref_dec(ctf_file_t *fp, ctf_id_t tid)
+{
+ ctf_dtdef_t *dtd = ctf_dtd_lookup(fp, tid);
+
+ if (dtd == NULL)
+ return;
+
+ if (!(fp->ctf_flags & LCTF_RDWR))
+ return;
+
+ ASSERT(dtd->dtd_ref >= 1);
+ dtd->dtd_ref--;
+}
+
+/*
* If the specified CTF container is writable and has been modified, reload
* this container with the updated type definitions. In order to make this
* code and the rest of libctf as simple as possible, we perform updates by
@@ -180,6 +227,10 @@ ctf_copy_membnames(ctf_dtdef_t *dtd, uchar_t *s)
* ctf_bufopen() will return a new ctf_file_t, but we want to keep the fp
* constant for the caller, so after ctf_bufopen() returns, we use bcopy to
* swap the interior of the old and new ctf_file_t's, and then free the old.
+ *
+ * Note that the lists of dynamic types stays around and the resulting container
+ * is still writeable. Furthermore, the reference counts that are on the dtd's
+ * are still valid.
*/
int
ctf_update(ctf_file_t *fp)
@@ -432,6 +483,7 @@ ctf_dtd_delete(ctf_file_t *fp, ctf_dtdef_t *dtd)
ctf_dtdef_t *p, **q = &fp->ctf_dthash[h];
ctf_dmdef_t *dmd, *nmd;
size_t len;
+ int kind, i;
for (p = *q; p != NULL; p = p->dtd_hash) {
if (p != dtd)
@@ -443,7 +495,8 @@ ctf_dtd_delete(ctf_file_t *fp, ctf_dtdef_t *dtd)
if (p != NULL)
*q = p->dtd_hash;
- switch (CTF_INFO_KIND(dtd->dtd_data.ctt_info)) {
+ kind = CTF_INFO_KIND(dtd->dtd_data.ctt_info);
+ switch (kind) {
case CTF_K_STRUCT:
case CTF_K_UNION:
case CTF_K_ENUM:
@@ -454,14 +507,33 @@ ctf_dtd_delete(ctf_file_t *fp, ctf_dtdef_t *dtd)
ctf_free(dmd->dmd_name, len);
fp->ctf_dtstrlen -= len;
}
+ if (kind != CTF_K_ENUM)
+ ctf_ref_dec(fp, dmd->dmd_type);
nmd = ctf_list_next(dmd);
ctf_free(dmd, sizeof (ctf_dmdef_t));
}
break;
case CTF_K_FUNCTION:
+ ctf_ref_dec(fp, dtd->dtd_data.ctt_type);
+ for (i = 0; i < CTF_INFO_VLEN(dtd->dtd_data.ctt_info); i++)
+ if (dtd->dtd_u.dtu_argv[i] != 0)
+ ctf_ref_dec(fp, dtd->dtd_u.dtu_argv[i]);
ctf_free(dtd->dtd_u.dtu_argv, sizeof (ctf_id_t) *
CTF_INFO_VLEN(dtd->dtd_data.ctt_info));
break;
+ case CTF_K_ARRAY:
+ ctf_ref_dec(fp, dtd->dtd_u.dtu_arr.ctr_contents);
+ ctf_ref_dec(fp, dtd->dtd_u.dtu_arr.ctr_index);
+ break;
+ case CTF_K_TYPEDEF:
+ ctf_ref_dec(fp, dtd->dtd_data.ctt_type);
+ break;
+ case CTF_K_POINTER:
+ case CTF_K_VOLATILE:
+ case CTF_K_CONST:
+ case CTF_K_RESTRICT:
+ ctf_ref_dec(fp, dtd->dtd_data.ctt_type);
+ break;
}
if (dtd->dtd_name) {
@@ -495,7 +567,9 @@ ctf_dtd_lookup(ctf_file_t *fp, ctf_id_t type)
* Discard all of the dynamic type definitions that have been added to the
* container since the last call to ctf_update(). We locate such types by
* scanning the list and deleting elements that have type IDs greater than
- * ctf_dtoldid, which is set by ctf_update(), above.
+ * ctf_dtoldid, which is set by ctf_update(), above. Note that to work properly
+ * with our reference counting schemes, we must delete the dynamic list in
+ * reverse.
*/
int
ctf_discard(ctf_file_t *fp)
@@ -508,11 +582,11 @@ ctf_discard(ctf_file_t *fp)
if (!(fp->ctf_flags & LCTF_DIRTY))
return (0); /* no update required */
- for (dtd = ctf_list_next(&fp->ctf_dtdefs); dtd != NULL; dtd = ntd) {
+ for (dtd = ctf_list_prev(&fp->ctf_dtdefs); dtd != NULL; dtd = ntd) {
if (dtd->dtd_type <= fp->ctf_dtoldid)
continue; /* skip types that have been committed */
- ntd = ctf_list_next(dtd);
+ ntd = ctf_list_prev(dtd);
ctf_dtd_delete(fp, dtd);
}
@@ -614,6 +688,8 @@ ctf_add_reftype(ctf_file_t *fp, uint_t flag, ctf_id_t ref, uint_t kind)
if ((type = ctf_add_generic(fp, flag, NULL, &dtd)) == CTF_ERR)
return (CTF_ERR); /* errno is set for us */
+ ctf_ref_inc(fp, ref);
+
dtd->dtd_data.ctt_info = CTF_TYPE_INFO(kind, flag, 0);
dtd->dtd_data.ctt_type = (ushort_t)ref;
@@ -645,16 +721,29 @@ ctf_add_array(ctf_file_t *fp, uint_t flag, const ctf_arinfo_t *arp)
{
ctf_dtdef_t *dtd;
ctf_id_t type;
+ ctf_file_t *fpd;
if (arp == NULL)
return (ctf_set_errno(fp, EINVAL));
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, arp->ctr_contents) == NULL &&
+ ctf_dtd_lookup(fp, arp->ctr_contents) == NULL)
+ return (ctf_set_errno(fp, ECTF_BADID));
+
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, arp->ctr_index) == NULL &&
+ ctf_dtd_lookup(fp, arp->ctr_index) == NULL)
+ return (ctf_set_errno(fp, ECTF_BADID));
+
if ((type = ctf_add_generic(fp, flag, NULL, &dtd)) == CTF_ERR)
return (CTF_ERR); /* errno is set for us */
dtd->dtd_data.ctt_info = CTF_TYPE_INFO(CTF_K_ARRAY, flag, 0);
dtd->dtd_data.ctt_size = 0;
dtd->dtd_u.dtu_arr = *arp;
+ ctf_ref_inc(fp, arp->ctr_contents);
+ ctf_ref_inc(fp, arp->ctr_index);
return (type);
}
@@ -662,6 +751,7 @@ ctf_add_array(ctf_file_t *fp, uint_t flag, const ctf_arinfo_t *arp)
int
ctf_set_array(ctf_file_t *fp, ctf_id_t type, const ctf_arinfo_t *arp)
{
+ ctf_file_t *fpd;
ctf_dtdef_t *dtd = ctf_dtd_lookup(fp, type);
if (!(fp->ctf_flags & LCTF_RDWR))
@@ -670,8 +760,22 @@ ctf_set_array(ctf_file_t *fp, ctf_id_t type, const ctf_arinfo_t *arp)
if (dtd == NULL || CTF_INFO_KIND(dtd->dtd_data.ctt_info) != CTF_K_ARRAY)
return (ctf_set_errno(fp, ECTF_BADID));
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, arp->ctr_contents) == NULL &&
+ ctf_dtd_lookup(fp, arp->ctr_contents) == NULL)
+ return (ctf_set_errno(fp, ECTF_BADID));
+
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, arp->ctr_index) == NULL &&
+ ctf_dtd_lookup(fp, arp->ctr_index) == NULL)
+ return (ctf_set_errno(fp, ECTF_BADID));
+
+ ctf_ref_dec(fp, dtd->dtd_u.dtu_arr.ctr_contents);
+ ctf_ref_dec(fp, dtd->dtd_u.dtu_arr.ctr_index);
fp->ctf_flags |= LCTF_DIRTY;
dtd->dtd_u.dtu_arr = *arp;
+ ctf_ref_inc(fp, arp->ctr_contents);
+ ctf_ref_inc(fp, arp->ctr_index);
return (0);
}
@@ -683,7 +787,9 @@ ctf_add_function(ctf_file_t *fp, uint_t flag,
ctf_dtdef_t *dtd;
ctf_id_t type;
uint_t vlen;
+ int i;
ctf_id_t *vdat = NULL;
+ ctf_file_t *fpd;
if (ctc == NULL || (ctc->ctc_flags & ~CTF_FUNC_VARARG) != 0 ||
(ctc->ctc_argc != 0 && argv == NULL))
@@ -696,6 +802,18 @@ ctf_add_function(ctf_file_t *fp, uint_t flag,
if (vlen > CTF_MAX_VLEN)
return (ctf_set_errno(fp, EOVERFLOW));
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, ctc->ctc_return) == NULL &&
+ ctf_dtd_lookup(fp, ctc->ctc_return) == NULL)
+ return (ctf_set_errno(fp, ECTF_BADID));
+
+ for (i = 0; i < ctc->ctc_argc; i++) {
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, argv[i]) == NULL &&
+ ctf_dtd_lookup(fp, argv[i]) == NULL)
+ return (ctf_set_errno(fp, ECTF_BADID));
+ }
+
if (vlen != 0 && (vdat = ctf_alloc(sizeof (ctf_id_t) * vlen)) == NULL)
return (ctf_set_errno(fp, EAGAIN));
@@ -707,6 +825,10 @@ ctf_add_function(ctf_file_t *fp, uint_t flag,
dtd->dtd_data.ctt_info = CTF_TYPE_INFO(CTF_K_FUNCTION, flag, vlen);
dtd->dtd_data.ctt_type = (ushort_t)ctc->ctc_return;
+ ctf_ref_inc(fp, ctc->ctc_return);
+ for (i = 0; i < ctc->ctc_argc; i++)
+ ctf_ref_inc(fp, argv[i]);
+
bcopy(argv, vdat, sizeof (ctf_id_t) * ctc->ctc_argc);
if (ctc->ctc_flags & CTF_FUNC_VARARG)
vdat[vlen - 1] = 0; /* add trailing zero to indicate varargs */
@@ -825,8 +947,11 @@ ctf_add_typedef(ctf_file_t *fp, uint_t flag, const char *name, ctf_id_t ref)
{
ctf_dtdef_t *dtd;
ctf_id_t type;
+ ctf_file_t *fpd;
- if (ref == CTF_ERR || ref < 0 || ref > CTF_MAX_TYPE)
+ fpd = fp;
+ if (ref == CTF_ERR || (ctf_lookup_by_id(&fpd, ref) == NULL &&
+ ctf_dtd_lookup(fp, ref) == NULL))
return (ctf_set_errno(fp, EINVAL));
if ((type = ctf_add_generic(fp, flag, name, &dtd)) == CTF_ERR)
@@ -834,6 +959,7 @@ ctf_add_typedef(ctf_file_t *fp, uint_t flag, const char *name, ctf_id_t ref)
dtd->dtd_data.ctt_info = CTF_TYPE_INFO(CTF_K_TYPEDEF, flag, 0);
dtd->dtd_data.ctt_type = (ushort_t)ref;
+ ctf_ref_inc(fp, ref);
return (type);
}
@@ -1008,6 +1134,45 @@ ctf_add_member(ctf_file_t *fp, ctf_id_t souid, const char *name, ctf_id_t type)
if (s != NULL)
fp->ctf_dtstrlen += strlen(s) + 1;
+ ctf_ref_inc(fp, type);
+ fp->ctf_flags |= LCTF_DIRTY;
+ return (0);
+}
+
+/*
+ * This removes a type from the dynamic section. This will fail if the type is
+ * referenced by another type. Note that the CTF ID is never reused currently by
+ * CTF. Note that if this container is a parent container then we just outright
+ * refuse to remove the type. There currently is no notion of searching for the
+ * ctf_dtdef_t in parent containers. If there is, then this constraint could
+ * become finer grained.
+ */
+int
+ctf_delete_type(ctf_file_t *fp, ctf_id_t type)
+{
+ ctf_file_t *fpd;
+ ctf_dtdef_t *dtd = ctf_dtd_lookup(fp, type);
+
+ if (!(fp->ctf_flags & LCTF_RDWR))
+ return (ctf_set_errno(fp, ECTF_RDONLY));
+
+ /*
+ * We want to give as useful an errno as possible. That means that we
+ * want to distinguish between a type which does not exist and one for
+ * which the type is not dynamic.
+ */
+ fpd = fp;
+ if (ctf_lookup_by_id(&fpd, type) == NULL &&
+ ctf_dtd_lookup(fp, type) == NULL)
+ return (CTF_ERR); /* errno is set for us */
+
+ if (dtd == NULL)
+ return (ctf_set_errno(fp, ECTF_NOTDYN));
+
+ if (dtd->dtd_ref != 0 || fp->ctf_refcnt > 1)
+ return (ctf_set_errno(fp, ECTF_REFERENCED));
+
+ ctf_dtd_delete(fp, dtd);
fp->ctf_flags |= LCTF_DIRTY;
return (0);
}
@@ -1103,6 +1268,9 @@ ctf_add_type(ctf_file_t *dst_fp, ctf_file_t *src_fp, ctf_id_t src_type)
ctf_hash_t *hp;
ctf_helem_t *hep;
+ if (dst_fp == src_fp)
+ return (src_type);
+
if (!(dst_fp->ctf_flags & LCTF_RDWR))
return (ctf_set_errno(dst_fp, ECTF_RDONLY));
@@ -1313,6 +1481,14 @@ ctf_add_type(ctf_file_t *dst_fp, ctf_file_t *src_fp, ctf_id_t src_type)
if (errs)
return (CTF_ERR); /* errno is set for us */
+
+ /*
+ * Now that we know that we can't fail, we go through and bump
+ * all the reference counts on the member types.
+ */
+ for (dmd = ctf_list_next(&dtd->dtd_u.dtu_members);
+ dmd != NULL; dmd = ctf_list_next(dmd))
+ ctf_ref_inc(dst_fp, dmd->dmd_type);
break;
}
diff --git a/cddl/contrib/opensolaris/common/ctf/ctf_error.c b/cddl/contrib/opensolaris/common/ctf/ctf_error.c
index 888c6c8..fe3d0de 100644
--- a/cddl/contrib/opensolaris/common/ctf/ctf_error.c
+++ b/cddl/contrib/opensolaris/common/ctf/ctf_error.c
@@ -23,8 +23,9 @@
* Copyright 2003 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright (c) 2012, Joyent, Inc.
+ */
#include <ctf_impl.h>
@@ -73,6 +74,8 @@ static const char *const _ctf_errlist[] = {
"Limit on number of dynamic types reached", /* ECTF_FULL */
"Duplicate member name definition", /* ECTF_DUPMEMBER */
"Conflicting type is already defined", /* ECTF_CONFLICT */
+ "Type has outstanding references", /* ECTF_REFERENCED */
+ "Type is not a dynamic type" /* ECTF_NOTDYN */
};
static const int _ctf_nerr = sizeof (_ctf_errlist) / sizeof (_ctf_errlist[0]);
diff --git a/cddl/contrib/opensolaris/common/ctf/ctf_impl.h b/cddl/contrib/opensolaris/common/ctf/ctf_impl.h
index 9999080..f56fa6a 100644
--- a/cddl/contrib/opensolaris/common/ctf/ctf_impl.h
+++ b/cddl/contrib/opensolaris/common/ctf/ctf_impl.h
@@ -24,12 +24,13 @@
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
+/*
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ */
#ifndef _CTF_IMPL_H
#define _CTF_IMPL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/sysmacros.h>
@@ -149,6 +150,7 @@ typedef struct ctf_dtdef {
char *dtd_name; /* name associated with definition (if any) */
ctf_id_t dtd_type; /* type identifier for this definition */
ctf_type_t dtd_data; /* type node (see <sys/ctf.h>) */
+ int dtd_ref; /* recfount for dyanmic types */
union {
ctf_list_t dtu_members; /* struct, union, or enum */
ctf_arinfo_t dtu_arr; /* array */
@@ -269,7 +271,9 @@ enum {
ECTF_DTFULL, /* CTF type is full (no more members allowed) */
ECTF_FULL, /* CTF container is full */
ECTF_DUPMEMBER, /* duplicate member name definition */
- ECTF_CONFLICT /* conflicting type definition present */
+ ECTF_CONFLICT, /* conflicting type definition present */
+ ECTF_REFERENCED, /* type has outstanding references */
+ ECTF_NOTDYN /* type is not a dynamic type */
};
extern ssize_t ctf_get_ctt_size(const ctf_file_t *, const ctf_type_t *,
diff --git a/cddl/contrib/opensolaris/common/ctf/ctf_open.c b/cddl/contrib/opensolaris/common/ctf/ctf_open.c
index e49a4cb..2148389 100644
--- a/cddl/contrib/opensolaris/common/ctf/ctf_open.c
+++ b/cddl/contrib/opensolaris/common/ctf/ctf_open.c
@@ -24,8 +24,9 @@
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ */
#include <ctf_impl.h>
#include <sys/mman.h>
@@ -810,8 +811,12 @@ ctf_close(ctf_file_t *fp)
if (fp->ctf_parent != NULL)
ctf_close(fp->ctf_parent);
- for (dtd = ctf_list_next(&fp->ctf_dtdefs); dtd != NULL; dtd = ntd) {
- ntd = ctf_list_next(dtd);
+ /*
+ * Note, to work properly with reference counting on the dynamic
+ * section, we must delete the list in reverse.
+ */
+ for (dtd = ctf_list_prev(&fp->ctf_dtdefs); dtd != NULL; dtd = ntd) {
+ ntd = ctf_list_prev(dtd);
ctf_dtd_delete(fp, dtd);
}
diff --git a/cddl/contrib/opensolaris/lib/libctf/common/ctf_lib.c b/cddl/contrib/opensolaris/lib/libctf/common/ctf_lib.c
index 6cd0036..0df44cf 100644
--- a/cddl/contrib/opensolaris/lib/libctf/common/ctf_lib.c
+++ b/cddl/contrib/opensolaris/lib/libctf/common/ctf_lib.c
@@ -216,6 +216,7 @@ ctf_fdopen(int fd, int *errp)
{
ctf_sect_t ctfsect, symsect, strsect;
ctf_file_t *fp = NULL;
+ size_t shstrndx, shnum;
struct stat64 st;
ssize_t nbytes;
@@ -278,11 +279,10 @@ ctf_fdopen(int fd, int *errp)
#else
uchar_t order = ELFDATA2LSB;
#endif
- GElf_Half i, n;
GElf_Shdr *sp;
void *strs_map;
- size_t strs_mapsz;
+ size_t strs_mapsz, i;
char *strs;
if (hdr.e32.e_ident[EI_DATA] != order)
@@ -298,11 +298,38 @@ ctf_fdopen(int fd, int *errp)
ehdr_to_gelf(&e32, &hdr.e64);
}
- if (hdr.e64.e_shstrndx >= hdr.e64.e_shnum)
+ shnum = hdr.e64.e_shnum;
+ shstrndx = hdr.e64.e_shstrndx;
+
+ /* Extended ELF sections */
+ if ((shstrndx == SHN_XINDEX) || (shnum == 0)) {
+ if (hdr.e32.e_ident[EI_CLASS] == ELFCLASS32) {
+ Elf32_Shdr x32;
+
+ if (pread64(fd, &x32, sizeof (x32),
+ hdr.e64.e_shoff) != sizeof (x32))
+ return (ctf_set_open_errno(errp,
+ errno));
+
+ shnum = x32.sh_size;
+ shstrndx = x32.sh_link;
+ } else {
+ Elf64_Shdr x64;
+
+ if (pread64(fd, &x64, sizeof (x64),
+ hdr.e64.e_shoff) != sizeof (x64))
+ return (ctf_set_open_errno(errp,
+ errno));
+
+ shnum = x64.sh_size;
+ shstrndx = x64.sh_link;
+ }
+ }
+
+ if (shstrndx >= shnum)
return (ctf_set_open_errno(errp, ECTF_CORRUPT));
- n = hdr.e64.e_shnum;
- nbytes = sizeof (GElf_Shdr) * n;
+ nbytes = sizeof (GElf_Shdr) * shnum;
if ((sp = malloc(nbytes)) == NULL)
return (ctf_set_open_errno(errp, errno));
@@ -314,7 +341,7 @@ ctf_fdopen(int fd, int *errp)
if (hdr.e32.e_ident[EI_CLASS] == ELFCLASS32) {
Elf32_Shdr *sp32;
- nbytes = sizeof (Elf32_Shdr) * n;
+ nbytes = sizeof (Elf32_Shdr) * shnum;
if ((sp32 = malloc(nbytes)) == NULL || pread64(fd,
sp32, nbytes, hdr.e64.e_shoff) != nbytes) {
@@ -322,7 +349,7 @@ ctf_fdopen(int fd, int *errp)
return (ctf_set_open_errno(errp, errno));
}
- for (i = 0; i < n; i++)
+ for (i = 0; i < shnum; i++)
shdr_to_gelf(&sp32[i], &sp[i]);
free(sp32);
@@ -336,14 +363,14 @@ ctf_fdopen(int fd, int *errp)
* Now mmap the section header strings section so that we can
* perform string comparison on the section names.
*/
- strs_mapsz = sp[hdr.e64.e_shstrndx].sh_size +
- (sp[hdr.e64.e_shstrndx].sh_offset & ~_PAGEMASK);
+ strs_mapsz = sp[shstrndx].sh_size +
+ (sp[shstrndx].sh_offset & ~_PAGEMASK);
strs_map = mmap64(NULL, strs_mapsz, PROT_READ, MAP_PRIVATE,
- fd, sp[hdr.e64.e_shstrndx].sh_offset & _PAGEMASK);
+ fd, sp[shstrndx].sh_offset & _PAGEMASK);
strs = (char *)strs_map +
- (sp[hdr.e64.e_shstrndx].sh_offset & ~_PAGEMASK);
+ (sp[shstrndx].sh_offset & ~_PAGEMASK);
if (strs_map == MAP_FAILED) {
free(sp);
@@ -354,15 +381,15 @@ ctf_fdopen(int fd, int *errp)
* Iterate over the section header array looking for the CTF
* section and symbol table. The strtab is linked to symtab.
*/
- for (i = 0; i < n; i++) {
+ for (i = 0; i < shnum; i++) {
const GElf_Shdr *shp = &sp[i];
const GElf_Shdr *lhp = &sp[shp->sh_link];
- if (shp->sh_link >= hdr.e64.e_shnum)
+ if (shp->sh_link >= shnum)
continue; /* corrupt sh_link field */
- if (shp->sh_name >= sp[hdr.e64.e_shstrndx].sh_size ||
- lhp->sh_name >= sp[hdr.e64.e_shstrndx].sh_size)
+ if (shp->sh_name >= sp[shstrndx].sh_size ||
+ lhp->sh_name >= sp[shstrndx].sh_size)
continue; /* corrupt sh_name field */
if (shp->sh_type == SHT_PROGBITS &&
diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c
index 278bfd4..259d045 100644
--- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c
+++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c
@@ -21,10 +21,10 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2010 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
+ * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
*/
#include <stdio.h>
@@ -452,8 +452,12 @@ iter_dependents_cb(zfs_handle_t *zhp, void *arg)
}
ida->stack = isf.next;
}
+
if (!first && err == 0)
err = ida->func(zhp, ida->data);
+ else
+ zfs_close(zhp);
+
return (err);
}
diff --git a/contrib/llvm/tools/lldb/include/lldb/Expression/IRExecutionUnit.h b/contrib/llvm/tools/lldb/include/lldb/Expression/IRExecutionUnit.h
index 885b651..9bc5592 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Expression/IRExecutionUnit.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Expression/IRExecutionUnit.h
@@ -336,13 +336,7 @@ private:
/// @return
/// True in case of failure, false in case of success.
//------------------------------------------------------------------
- virtual bool finalizeMemory(std::string *ErrMsg) {
- // TODO: Ensure that the instruction cache is flushed because
- // relocations are updated by dy-load. See:
- // sys::Memory::InvalidateInstructionCache
- // llvm::SectionMemoryManager
- return false;
- }
+ bool applyPermissions(std::string *ErrMsg) { return false; }
//------------------------------------------------------------------
/// Passthrough interface stub
@@ -352,6 +346,38 @@ private:
//------------------------------------------------------------------
/// Passthrough interface stub
//------------------------------------------------------------------
+ virtual uint8_t* startExceptionTable(const llvm::Function* F,
+ uintptr_t &ActualSize);
+
+ //------------------------------------------------------------------
+ /// Complete the exception table for a function, and add it to the
+ /// m_exception_tables map
+ ///
+ /// @param[in] F
+ /// The function whose exception table is being written.
+ ///
+ /// @param[in] TableStart
+ /// The first byte of the exception table.
+ ///
+ /// @param[in] TableEnd
+ /// The last byte of the exception table.
+ ///
+ /// @param[in] FrameRegister
+ /// I don't know what this does, but it's passed through.
+ //------------------------------------------------------------------
+ virtual void endExceptionTable(const llvm::Function *F,
+ uint8_t *TableStart,
+ uint8_t *TableEnd,
+ uint8_t* FrameRegister);
+
+ //------------------------------------------------------------------
+ /// Passthrough interface stub
+ //------------------------------------------------------------------
+ virtual void deallocateExceptionTable(void *ET);
+
+ //------------------------------------------------------------------
+ /// Passthrough interface stub
+ //------------------------------------------------------------------
virtual size_t GetDefaultCodeSlabSize() {
return m_default_mm_ap->GetDefaultCodeSlabSize();
}
diff --git a/contrib/llvm/tools/lldb/source/Expression/ClangExpressionParser.cpp b/contrib/llvm/tools/lldb/source/Expression/ClangExpressionParser.cpp
index 98c0bfd..d026f2f 100644
--- a/contrib/llvm/tools/lldb/source/Expression/ClangExpressionParser.cpp
+++ b/contrib/llvm/tools/lldb/source/Expression/ClangExpressionParser.cpp
@@ -52,7 +52,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/PathV1.h"
#include "llvm/Support/TargetSelect.h"
#if defined(__FreeBSD__)
@@ -81,16 +81,19 @@ using namespace lldb_private;
//===----------------------------------------------------------------------===//
std::string GetBuiltinIncludePath(const char *Argv0) {
- SmallString<128> P(llvm::sys::fs::getMainExecutable(
- Argv0, (void *)(intptr_t) GetBuiltinIncludePath));
-
- if (!P.empty()) {
- llvm::sys::path::remove_filename(P); // Remove /clang from foo/bin/clang
- llvm::sys::path::remove_filename(P); // Remove /bin from foo/bin
-
+ llvm::sys::Path P =
+ llvm::sys::Path::GetMainExecutable(Argv0,
+ (void*)(intptr_t) GetBuiltinIncludePath);
+
+ if (!P.isEmpty()) {
+ P.eraseComponent(); // Remove /clang from foo/bin/clang
+ P.eraseComponent(); // Remove /bin from foo/bin
+
// Get foo/lib/clang/<version>/include
- llvm::sys::path::append(P, "lib", "clang", CLANG_VERSION_STRING,
- "include");
+ P.appendComponent("lib");
+ P.appendComponent("clang");
+ P.appendComponent(CLANG_VERSION_STRING);
+ P.appendComponent("include");
}
return P.str();
diff --git a/contrib/llvm/tools/lldb/source/Expression/IRExecutionUnit.cpp b/contrib/llvm/tools/lldb/source/Expression/IRExecutionUnit.cpp
index 16ef6e5..a2b2594 100644
--- a/contrib/llvm/tools/lldb/source/Expression/IRExecutionUnit.cpp
+++ b/contrib/llvm/tools/lldb/source/Expression/IRExecutionUnit.cpp
@@ -563,6 +563,28 @@ IRExecutionUnit::MemoryManager::deallocateFunctionBody(void *Body)
m_default_mm_ap->deallocateFunctionBody(Body);
}
+uint8_t*
+IRExecutionUnit::MemoryManager::startExceptionTable(const llvm::Function* F,
+ uintptr_t &ActualSize)
+{
+ return m_default_mm_ap->startExceptionTable(F, ActualSize);
+}
+
+void
+IRExecutionUnit::MemoryManager::endExceptionTable(const llvm::Function *F,
+ uint8_t *TableStart,
+ uint8_t *TableEnd,
+ uint8_t* FrameRegister)
+{
+ m_default_mm_ap->endExceptionTable(F, TableStart, TableEnd, FrameRegister);
+}
+
+void
+IRExecutionUnit::MemoryManager::deallocateExceptionTable(void *ET)
+{
+ m_default_mm_ap->deallocateExceptionTable (ET);
+}
+
lldb::addr_t
IRExecutionUnit::GetRemoteAddressForLocal (lldb::addr_t local_address)
{
diff --git a/contrib/llvm/tools/lldb/source/Host/common/FileSpec.cpp b/contrib/llvm/tools/lldb/source/Host/common/FileSpec.cpp
index 08d626e..025cf6f 100644
--- a/contrib/llvm/tools/lldb/source/Host/common/FileSpec.cpp
+++ b/contrib/llvm/tools/lldb/source/Host/common/FileSpec.cpp
@@ -553,8 +553,9 @@ FileSpec::ResolveExecutableLocation ()
if (file_cstr)
{
const std::string file_str (file_cstr);
- std::string path = llvm::sys::FindProgramByName (file_str);
- llvm::StringRef dir_ref = llvm::sys::path::parent_path(path);
+ llvm::sys::Path path = llvm::sys::Program::FindProgramByName (file_str);
+ const std::string &path_str = path.str();
+ llvm::StringRef dir_ref = llvm::sys::path::parent_path(path_str);
//llvm::StringRef dir_ref = path.getDirname();
if (! dir_ref.empty())
{
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Disassembler/llvm/DisassemblerLLVMC.cpp b/contrib/llvm/tools/lldb/source/Plugins/Disassembler/llvm/DisassemblerLLVMC.cpp
index e920d70..b281f2b 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Disassembler/llvm/DisassemblerLLVMC.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Disassembler/llvm/DisassemblerLLVMC.cpp
@@ -10,7 +10,6 @@
#include "DisassemblerLLVMC.h"
#include "llvm-c/Disassembler.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
@@ -18,7 +17,6 @@
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCRelocationInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryObject.h"
@@ -442,30 +440,23 @@ DisassemblerLLVMC::LLVMCDisassembler::LLVMCDisassembler (const char *triple, uns
m_subtarget_info_ap.reset(curr_target->createMCSubtargetInfo(triple, "",
features_str));
- m_asm_info_ap.reset(curr_target->createMCAsmInfo(*curr_target->createMCRegInfo(triple), triple));
-
+ m_asm_info_ap.reset(curr_target->createMCAsmInfo(triple));
+
if (m_instr_info_ap.get() == NULL || m_reg_info_ap.get() == NULL || m_subtarget_info_ap.get() == NULL || m_asm_info_ap.get() == NULL)
{
m_is_valid = false;
return;
}
- m_context_ap.reset(new llvm::MCContext(m_asm_info_ap.get(), m_reg_info_ap.get(), 0));
+ m_context_ap.reset(new llvm::MCContext(*m_asm_info_ap.get(), *(m_reg_info_ap.get()), 0));
m_disasm_ap.reset(curr_target->createMCDisassembler(*m_subtarget_info_ap.get()));
- if (m_disasm_ap.get() && m_context_ap.get())
+ if (m_disasm_ap.get())
{
- llvm::OwningPtr<llvm::MCRelocationInfo> RelInfo(curr_target->createMCRelocationInfo(triple, *m_context_ap.get()));
- if (!RelInfo)
- {
- m_is_valid = false;
- return;
- }
m_disasm_ap->setupForSymbolicDisassembly(NULL,
- DisassemblerLLVMC::SymbolLookupCallback,
- (void *) &owner,
- m_context_ap.get(),
- RelInfo);
+ DisassemblerLLVMC::SymbolLookupCallback,
+ (void *) &owner,
+ m_context_ap.get());
unsigned asm_printer_variant;
if (flavor == ~0U)
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp b/contrib/llvm/tools/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp
index 2dd04dd..52a7fb0 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp
@@ -25,7 +25,7 @@
#include "Utility/ARM_DWARF_Registers.h"
#include "llvm/Support/MathExtras.h" // for SignExtend32 template function
- // and countTrailingZeros function
+ // and CountTrailingZeros_32 function
using namespace lldb;
using namespace lldb_private;
@@ -47,7 +47,7 @@ using namespace lldb_private;
static uint32_t
CountITSize (uint32_t ITMask) {
// First count the trailing zeros of the IT mask.
- uint32_t TZ = llvm::countTrailingZeros(ITMask);
+ uint32_t TZ = llvm::CountTrailingZeros_32(ITMask);
if (TZ > 3)
{
#ifdef LLDB_CONFIGURATION_DEBUG
diff --git a/etc/network.subr b/etc/network.subr
index f690725..7dfb328 100644
--- a/etc/network.subr
+++ b/etc/network.subr
@@ -661,9 +661,9 @@ ipv4_down()
for _inet in $inetList ; do
# get rid of extraneous line
case $_inet in
- "") break ;;
- inet\ *) ;;
- *) continue ;;
+ "") break ;;
+ \ inet\ *|inet\ *) ;;
+ *) continue ;;
esac
[ -z "$_inet" ] && break
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 54a14e8..3a7da85 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -539,6 +539,7 @@ MAN= aac.4 \
${_virtio_scsi.4} \
vkbd.4 \
vlan.4 \
+ ${_vmx.4} \
vpo.4 \
vr.4 \
vte.4 \
@@ -706,6 +707,7 @@ MLINKS+=ural.4 if_ural.4
MLINKS+=${_urtw.4} ${_if_urtw.4}
MLINKS+=vge.4 if_vge.4
MLINKS+=vlan.4 if_vlan.4
+MLINKS+=${_vmx.4} ${_if_vmx.4}
MLINKS+=vpo.4 imm.4
MLINKS+=vr.4 if_vr.4
MLINKS+=vte.4 if_vte.4
@@ -758,6 +760,7 @@ _if_nfe.4= if_nfe.4
_if_nve.4= if_nve.4
_if_nxge.4= if_nxge.4
_if_urtw.4= if_urtw.4
+_if_vmx.4= if_vmx.4
_if_vtnet.4= if_vtnet.4
_if_vxge.4= if_vxge.4
_if_wpi.4= if_wpi.4
@@ -777,6 +780,7 @@ _virtio.4= virtio.4
_virtio_balloon.4=virtio_balloon.4
_virtio_blk.4= virtio_blk.4
_virtio_scsi.4= virtio_scsi.4
+_vmx.4= vmx.4
_vtnet.4= vtnet.4
_vxge.4= vxge.4
_padlock.4= padlock.4
diff --git a/share/man/man4/sa.4 b/share/man/man4/sa.4
index eae3bf5..aa72f68 100644
--- a/share/man/man4/sa.4
+++ b/share/man/man4/sa.4
@@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd June 6, 1999
+.Dd August 23, 2013
.Dt SA 4
.Os
.Sh NAME
@@ -159,6 +159,70 @@ of 0.
(As above, if the file mark is never read, it remains for the next
process to read if in no-rewind mode.)
.El
+.Sh BLOCK SIZES
+By default, the driver will NOT accept reads or writes to a tape device that
+are larger than may be written to or read from the mounted tape using a single
+write or read request.
+Because of this, the application author may have confidence that his wishes
+are respected in terms of the block size written to tape.
+For example, if the user tries to write a 256KB block to the tape, but the
+controller can handle no more than 128KB, the write will fail.
+The previous
+.Fx
+behavior, prior to
+.Fx
+10.0,
+was to break up large reads or writes into smaller blocks when going to the
+tape.
+The problem with that behavior, though, is that it hides the actual on-tape
+block size from the application writer, at least in variable block mode.
+.Pp
+If the user would like his large reads and writes broken up into separate
+pieces, he may set the following loader tunables.
+Note that these tunables WILL GO AWAY in
+.Fx 11.0 .
+They are provided for transition purposes only.
+.Bl -tag -width 12
+.It kern.cam.sa.allow_io_split
+.Pp
+This variable, when set to 1, will configure all
+.Nm
+devices to split large buffers into smaller pieces when needed.
+.It kern.cam.sa.%d.allow_io_split
+.Pp
+This variable, when set to 1, will configure the given
+.Nm
+unit to split large buffers into multiple pieces.
+This will override the global setting, if it exists.
+.El
+.Pp
+There are several
+.Xr sysctl 8
+variables available to view block handling parameters:
+.Bl -tag -width 12
+.It kern.cam.sa.%d.allow_io_split
+.Pp
+This variable allows the user to see, but not modify, the current I/O split
+setting.
+The user is not permitted to modify this setting so that there is no chance
+of behavior changing for the application while a tape is mounted.
+.It kern.cam.sa.%d.maxio
+.Pp
+This variable shows the maximum I/O size in bytes that is allowed by the
+combination of kernel tuning parameters (MAXPHYS, DFLTPHYS) and the
+capabilities of the controller that is attached to the tape drive.
+Applications may look at this value for a guide on how large an I/O may be
+permitted, but should keep in mind that the actual maximum may be
+restricted further by the tape drive via the
+.Tn SCSI
+READ BLOCK LIMITS command.
+.It kern.cam.sa.%d.cpi_maxio
+.Pp
+This variable shows the maximum I/O size supported by the controller, in
+bytes, that is reported via the CAM Path Inquiry CCB (XPT_PATH_INQ).
+If this is 0, that means that the controller has not reported a maximum I/O
+size.
+.El
.Sh FILE MARK HANDLING
The handling of file marks on write is automatic.
If the user has
diff --git a/share/man/man4/vmx.4 b/share/man/man4/vmx.4
new file mode 100644
index 0000000..0e7b7f8
--- /dev/null
+++ b/share/man/man4/vmx.4
@@ -0,0 +1,112 @@
+.\"
+.\" Copyright (c) 2006,2013 Reyk Floeter <reyk@openbsd.org>
+.\"
+.\" Permission to use, copy, modify, and distribute this software for any
+.\" purpose with or without fee is hereby granted, provided that the above
+.\" copyright notice and this permission notice appear in all copies.
+.\"
+.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+.\"
+.\" $OpenBSD: src/share/man/man4/vmx.4,v 1.1 2013/05/31 20:18:44 reyk Exp $
+.\"
+.\" $FreeBSD$
+.\"
+.Dd August 4, 2013
+.Dt VMX 4
+.Os
+.Sh NAME
+.Nm vmx
+.Nd VMware VMXNET3 Virtual Interface Controller device
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following line in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device vmx"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_vmx_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for the VMXNET3 virtual NIC available in virtual
+machines by VMware.
+It appears as a simple Ethernet device but is actually a virtual network
+interface to the underlying host operating system.
+.Pp
+This driver supports the
+.Ic VMXNET3
+driver protocol, as an alternative to the emulated
+.Xr pcn 4 ,
+.Xr em 4
+interfaces also available in the VMware environment.
+The
+.Nm
+driver is optimized for the virtual machine, it can provide advanced
+capabilities depending on the underlying host operating system and
+the physical network interface controller of the host.
+The
+.Nm
+driver supports features like multiqueue support, IPv6
+checksum offloading, MSI/MSI-X support and hardware VLAN tagging in
+VMware's VLAN Guest Tagging (VGT) mode.
+.Pp
+The
+.Nm
+driver supports VMXNET3 VMware virtual NICs provided by the virtual
+machine hardware version 7 or newer, as provided by the following
+products:
+.Pp
+.Bl -bullet -compact -offset indent
+.It
+VMware ESX/ESXi 4.0 and newer
+.It
+VMware Server 2.0 and newer
+.It
+VMware Workstation 6.5 and newer
+.It
+VMware Fusion 2.0 and newer
+.El
+.Pp
+For more information on configuring this device, see
+.Xr ifconfig 8 .
+.Sh EXAMPLES
+The following entry must be added to the VMware configuration file
+to provide the
+.Nm
+device:
+.Bd -literal -offset indent
+ethernet0.virtualDev = "vmxnet3"
+.Ed
+.Sh SEE ALSO
+.Xr altq 4 ,
+.Xr arp 4 ,
+.Xr em 4 ,
+.Xr netintro 4 ,
+.Xr ng_ether 4 ,
+.Xr pcn 4 ,
+.Xr vlan 4 ,
+.Xr ifconfig 8
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was ported from
+.Ox
+by
+.An Bryan Venteicher Aq bryanv@freebsd.org .
+The
+.Ox
+driver was written by
+.An Tsubai Masanari .
diff --git a/share/man/man9/pfil.9 b/share/man/man9/pfil.9
index d34c579..89de302 100644
--- a/share/man/man9/pfil.9
+++ b/share/man/man9/pfil.9
@@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd October 22, 2012
+.Dd August 23, 2013
.Dt PFIL 9
.Os
.Sh NAME
@@ -36,7 +36,6 @@
.Nm pfil_head_register ,
.Nm pfil_head_unregister ,
.Nm pfil_head_get ,
-.Nm pfil_hook_get ,
.Nm pfil_add_hook ,
.Nm pfil_remove_hook ,
.Nm pfil_run_hooks ,
@@ -50,20 +49,18 @@
.In sys/mbuf.h
.In net/if.h
.In net/pfil.h
+.Bd -literal
+typedef int (*pfil_func_t)(void *arg, struct mbuf **mp, struct ifnet *, int dir, struct inpcb);
.Ft int
.Fn pfil_head_register "struct pfil_head *head"
.Ft int
.Fn pfil_head_unregister "struct pfil_head *head"
.Ft "struct pfil_head *"
.Fn pfil_head_get "int af" "u_long dlt"
-.Ft "struct packet_filter_hook *"
-.Fn pfil_hook_get "int dir" "struct pfil_head *head"
.Ft void
-.Fn pfil_add_hook "int (*func)()" "void *arg" "int flags" "struct pfil_head *"
+.Fn pfil_add_hook "pfil_func_t" "void *arg" "int flags" "struct pfil_head *"
.Ft void
-.Fn pfil_remove_hook "int (*func)()" "void *arg" "int flags" "struct pfil_head *"
-.Ft int
-.Fn (*func) "void *arg" "struct mbuf **mp" "struct ifnet *" "int dir" "struct inpcb *"
+.Fn pfil_remove_hook "pfil_func_t" "void *arg" "int flags" "struct pfil_head *"
.Ft int
.Fn pfil_run_hooks "struct pfil_head *head" "struct mbuf **mp" "struct ifnet *" "int dir" "struct inpcb *"
.Ft void
@@ -245,11 +242,7 @@ lock export was added in
.Fx 10.0 .
.Sh BUGS
The
-.Fn pfil_hook_get
-function
-is only safe for internal use.
-.Pp
-When a
+.Fn When a
.Vt pfil_head
is being modified, no traffic is diverted
(to avoid deadlock).
diff --git a/sys/boot/i386/libi386/biosacpi.c b/sys/boot/i386/libi386/biosacpi.c
index ff8b1ca..bedc722 100644
--- a/sys/boot/i386/libi386/biosacpi.c
+++ b/sys/boot/i386/libi386/biosacpi.c
@@ -61,7 +61,7 @@ biosacpi_detect(void)
return;
/* export values from the RSDP */
- sprintf(buf, "%u", VTOP(rsdp));
+ sprintf(buf, "0x%08x", VTOP(rsdp));
setenv("hint.acpi.0.rsdp", buf, 1);
revision = rsdp->Revision;
if (revision == 0)
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 2b94d10..da0b4da 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -1091,7 +1091,8 @@ xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
cdai.buftype = CDAI_TYPE_SERIAL_NUM;
else if (!strcmp(attr, "GEOM::physpath"))
cdai.buftype = CDAI_TYPE_PHYS_PATH;
- else if (!strcmp(attr, "GEOM::lunid")) {
+ else if (strcmp(attr, "GEOM::lunid") == 0 ||
+ strcmp(attr, "GEOM::lunname") == 0) {
cdai.buftype = CDAI_TYPE_SCSI_DEVID;
cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
} else
@@ -1108,11 +1109,14 @@ xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
if (cdai.provsiz == 0)
goto out;
if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
- idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
- cdai.provsiz, scsi_devid_is_lun_naa);
- if (idd == NULL)
+ if (strcmp(attr, "GEOM::lunid") == 0) {
idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
- cdai.provsiz, scsi_devid_is_lun_eui64);
+ cdai.provsiz, scsi_devid_is_lun_naa);
+ if (idd == NULL)
+ idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
+ cdai.provsiz, scsi_devid_is_lun_eui64);
+ } else
+ idd = NULL;
if (idd == NULL)
idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
cdai.provsiz, scsi_devid_is_lun_t10);
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 6d6edf7..215f1cd 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -894,8 +894,13 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
struct ctl_lun *lun;
struct ctl_page_index *page_index;
struct copan_aps_subpage *current_sp;
+ uint32_t targ_lun;
- lun = ctl_softc->ctl_luns[msg_info.hdr.nexus.targ_lun];
+ targ_lun = msg_info.hdr.nexus.targ_lun;
+ if (msg_info.hdr.nexus.lun_map_fn != NULL)
+ targ_lun = msg_info.hdr.nexus.lun_map_fn(msg_info.hdr.nexus.lun_map_arg, targ_lun);
+
+ lun = ctl_softc->ctl_luns[targ_lun];
page_index = &lun->mode_pages.index[index_to_aps_page];
current_sp = (struct copan_aps_subpage *)
(page_index->page_data +
@@ -1098,7 +1103,8 @@ ctl_init(void)
mtx_unlock(&softc->ctl_lock);
return (error);
}
- printf("ctl: CAM Target Layer loaded\n");
+ if (bootverbose)
+ printf("ctl: CAM Target Layer loaded\n");
/*
* Initialize the initiator and portname mappings
@@ -1194,7 +1200,8 @@ ctl_shutdown(void)
free(control_softc, M_DEVBUF);
control_softc = NULL;
- printf("ctl: CAM Target Layer unloaded\n");
+ if (bootverbose)
+ printf("ctl: CAM Target Layer unloaded\n");
}
static int
@@ -1678,12 +1685,16 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
union ctl_ha_msg msg_info;
struct ctl_lun *lun;
int retval = 0;
+ uint32_t targ_lun;
ctl_softc = control_softc;
if (have_lock == 0)
mtx_lock(&ctl_softc->ctl_lock);
- lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
+ targ_lun = ctsio->io_hdr.nexus.targ_lun;
+ if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+ lun = ctl_softc->ctl_luns[targ_lun];
if (lun==NULL)
{
/*
@@ -2980,6 +2991,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct sbuf *sb;
struct ctl_lun *lun;
struct ctl_lun_list *list;
+ struct ctl_be_lun_option *opt;
list = (struct ctl_lun_list *)addr;
@@ -3097,17 +3109,16 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- if (lun->backend->lun_info == NULL) {
- retval = sbuf_printf(sb, "</lun>\n");
+ if (lun->backend->lun_info != NULL) {
+ retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
+ if (retval != 0)
+ break;
+ }
+ STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
+ retval = sbuf_printf(sb, "<%s>%s</%s>", opt->name, opt->value, opt->name);
if (retval != 0)
break;
- continue;
}
-
- retval =lun->backend->lun_info(lun->be_lun->be_lun, sb);
-
- if (retval != 0)
- break;
retval = sbuf_printf(sb, "</lun>\n");
@@ -4432,9 +4443,14 @@ ctl_free_lun(struct ctl_lun *lun)
*/
for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
io = next_io) {
+ uint32_t targ_lun;
+
next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+ targ_lun = io->io_hdr.nexus.targ_lun;
+ if (io->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
- && (io->io_hdr.nexus.targ_lun == lun->lun))
+ && (targ_lun == lun->lun))
STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
ctl_io_hdr, links);
}
@@ -8247,12 +8263,16 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
struct ctl_lun *lun;
struct ctl_softc *softc;
int i;
+ uint32_t targ_lun;
softc = control_softc;
mtx_lock(&softc->ctl_lock);
- lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
+ targ_lun = msg->hdr.nexus.targ_lun;
+ if (msg->hdr.nexus.lun_map_fn != NULL)
+ targ_lun = msg->hdr.nexus.lun_map_fn(msg->hdr.nexus.lun_map_arg, targ_lun);
+ lun = softc->ctl_luns[targ_lun];
switch(msg->pr.pr_info.action) {
case CTL_PR_REG_KEY:
if (!lun->per_res[msg->pr.pr_info.residx].registered) {
@@ -8601,7 +8621,7 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
int num_luns, retval;
uint32_t alloc_len, lun_datalen;
int num_filled, well_known;
- uint32_t initidx;
+ uint32_t initidx, targ_lun_id, lun_id;
retval = CTL_RETVAL_COMPLETE;
well_known = 0;
@@ -8662,63 +8682,47 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
- if (lun_datalen < alloc_len) {
- ctsio->residual = alloc_len - lun_datalen;
- ctsio->kern_data_len = lun_datalen;
- ctsio->kern_total_len = lun_datalen;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
- ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
-
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
- /*
- * We set this to the actual data length, regardless of how much
- * space we actually have to return results. If the user looks at
- * this value, he'll know whether or not he allocated enough space
- * and reissue the command if necessary. We don't support well
- * known logical units, so if the user asks for that, return none.
- */
- scsi_ulto4b(lun_datalen - 8, lun_data->length);
-
mtx_lock(&control_softc->ctl_lock);
- for (num_filled = 0, lun = STAILQ_FIRST(&control_softc->lun_list);
- (lun != NULL) && (num_filled < num_luns);
- lun = STAILQ_NEXT(lun, links)) {
+ for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
+ lun_id = targ_lun_id;
+ if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
+ lun_id = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, lun_id);
+ if (lun_id >= CTL_MAX_LUNS)
+ continue;
+ lun = control_softc->ctl_luns[lun_id];
+ if (lun == NULL)
+ continue;
- if (lun->lun <= 0xff) {
+ if (targ_lun_id <= 0xff) {
/*
* Peripheral addressing method, bus number 0.
*/
lun_data->luns[num_filled].lundata[0] =
RPL_LUNDATA_ATYP_PERIPH;
- lun_data->luns[num_filled].lundata[1] = lun->lun;
+ lun_data->luns[num_filled].lundata[1] = targ_lun_id;
num_filled++;
- } else if (lun->lun <= 0x3fff) {
+ } else if (targ_lun_id <= 0x3fff) {
/*
* Flat addressing method.
*/
lun_data->luns[num_filled].lundata[0] =
RPL_LUNDATA_ATYP_FLAT |
- (lun->lun & RPL_LUNDATA_FLAT_LUN_MASK);
+ (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK);
#ifdef OLDCTLHEADERS
(SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) |
- (lun->lun & SRLD_BUS_LUN_MASK);
+ (targ_lun_id & SRLD_BUS_LUN_MASK);
#endif
lun_data->luns[num_filled].lundata[1] =
#ifdef OLDCTLHEADERS
- lun->lun >> SRLD_BUS_LUN_BITS;
+ targ_lun_id >> SRLD_BUS_LUN_BITS;
#endif
- lun->lun >> RPL_LUNDATA_FLAT_LUN_BITS;
+ targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS;
num_filled++;
} else {
printf("ctl_report_luns: bogus LUN number %jd, "
- "skipping\n", (intmax_t)lun->lun);
+ "skipping\n", (intmax_t)targ_lun_id);
}
/*
* According to SPC-3, rev 14 section 6.21:
@@ -8743,6 +8747,35 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
mtx_unlock(&control_softc->ctl_lock);
/*
+ * It's quite possible that we've returned fewer LUNs than we allocated
+ * space for. Trim it.
+ */
+ lun_datalen = sizeof(*lun_data) +
+ (num_filled * sizeof(struct scsi_report_luns_lundata));
+
+ if (lun_datalen < alloc_len) {
+ ctsio->residual = alloc_len - lun_datalen;
+ ctsio->kern_data_len = lun_datalen;
+ ctsio->kern_total_len = lun_datalen;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ /*
+ * We set this to the actual data length, regardless of how much
+ * space we actually have to return results. If the user looks at
+ * this value, he'll know whether or not he allocated enough space
+ * and reissue the command if necessary. We don't support well
+ * known logical units, so if the user asks for that, return none.
+ */
+ scsi_ulto4b(lun_datalen - 8, lun_data->length);
+
+ /*
* We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
* this request.
*/
@@ -9077,6 +9110,14 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
int devid_len;
ctl_softc = control_softc;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+ mtx_unlock(&ctl_softc->ctl_lock);
+
+ if (fe->devid != NULL)
+ return ((fe->devid)(ctsio, alloc_len));
+
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
devid_len = sizeof(struct scsi_vpd_device_id) +
@@ -9130,8 +9171,6 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
mtx_lock(&ctl_softc->ctl_lock);
- fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
-
/*
* For Fibre channel,
*/
@@ -10350,7 +10389,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
struct ctl_lun *lun;
struct ctl_cmd_entry *entry;
uint8_t opcode;
- uint32_t initidx;
+ uint32_t initidx, targ_lun;
int retval;
retval = 0;
@@ -10361,9 +10400,12 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
mtx_lock(&ctl_softc->ctl_lock);
- if ((ctsio->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
- && (ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun] != NULL)) {
- lun = ctl_softc->ctl_luns[ctsio->io_hdr.nexus.targ_lun];
+ targ_lun = ctsio->io_hdr.nexus.targ_lun;
+ if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+ if ((targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[targ_lun] != NULL)) {
+ lun = ctl_softc->ctl_luns[targ_lun];
/*
* If the LUN is invalid, pretend that it doesn't exist.
* It will go away as soon as all pending I/O has been
@@ -10403,6 +10445,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
ctl_set_unsupported_lun(ctsio);
mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
+ CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
goto bailout;
} else {
/*
@@ -10769,6 +10812,7 @@ ctl_abort_task(union ctl_io *io)
char printbuf[128];
#endif
int found;
+ uint32_t targ_lun;
ctl_softc = control_softc;
found = 0;
@@ -10776,9 +10820,12 @@ ctl_abort_task(union ctl_io *io)
/*
* Look up the LUN.
*/
- if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
- && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
- lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
+ targ_lun = io->io_hdr.nexus.targ_lun;
+ if (io->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ if ((targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[targ_lun];
else
goto bailout;
@@ -10968,6 +11015,8 @@ ctl_run_task_queue(struct ctl_softc *ctl_softc)
int retval;
targ_lun = io->io_hdr.nexus.targ_lun;
+ if (io->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL))
@@ -11042,7 +11091,7 @@ ctl_run_task_queue(struct ctl_softc *ctl_softc)
(uintmax_t)io->io_hdr.nexus.initid.id,
io->io_hdr.nexus.targ_port,
(uintmax_t)io->io_hdr.nexus.targ_target.id,
- io->io_hdr.nexus.targ_lun,
+ io->io_hdr.nexus.targ_lun /* XXX */,
(io->io_hdr.io_type == CTL_IO_TASK) ?
io->taskio.tag_num : io->scsiio.tag_num);
STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
@@ -11066,10 +11115,14 @@ ctl_handle_isc(union ctl_io *io)
int free_io;
struct ctl_lun *lun;
struct ctl_softc *ctl_softc;
+ uint32_t targ_lun;
ctl_softc = control_softc;
- lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
+ targ_lun = io->io_hdr.nexus.targ_lun;
+ if (io->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ lun = ctl_softc->ctl_luns[targ_lun];
switch (io->io_hdr.msg_type) {
case CTL_MSG_SERIALIZE:
@@ -12625,7 +12678,7 @@ ctl_queue_sense(union ctl_io *io)
{
struct ctl_lun *lun;
struct ctl_softc *ctl_softc;
- uint32_t initidx;
+ uint32_t initidx, targ_lun;
ctl_softc = control_softc;
@@ -12644,9 +12697,12 @@ ctl_queue_sense(union ctl_io *io)
* If we don't have a LUN for this, just toss the sense
* information.
*/
- if ((io->io_hdr.nexus.targ_lun < CTL_MAX_LUNS)
- && (ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun] != NULL))
- lun = ctl_softc->ctl_luns[io->io_hdr.nexus.targ_lun];
+ targ_lun = io->io_hdr.nexus.targ_lun;
+ if (io->io_hdr.nexus.lun_map_fn != NULL)
+ targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ if ((targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[targ_lun];
else
goto bailout;
@@ -13047,6 +13103,8 @@ ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
{
ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
+ printf("%s: go\n", __func__);
+
// UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
if (c->state == CTL_HA_STATE_UNKNOWN ) {
ctl_is_single = 0;
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
index da382e6..07c82ba 100644
--- a/sys/cam/ctl/ctl.h
+++ b/sys/cam/ctl/ctl.h
@@ -52,6 +52,7 @@ typedef enum {
CTL_PORT_SCSI = 0x02,
CTL_PORT_IOCTL = 0x04,
CTL_PORT_INTERNAL = 0x08,
+ CTL_PORT_ISCSI = 0x10,
CTL_PORT_ALL = 0xff,
CTL_PORT_ISC = 0x100 // FC port for inter-shelf communication
} ctl_port_type;
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index c3798e4..56fb8a5 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -173,6 +173,12 @@ typedef void (*be_lun_config_t)(void *be_lun,
* The links field is for CTL internal use only, and should not be used by
* the backend.
*/
+struct ctl_be_lun_option {
+ STAILQ_ENTRY(ctl_be_lun_option) links;
+ char *name;
+ char *value;
+};
+
struct ctl_be_lun {
uint8_t lun_type; /* passed to CTL */
ctl_backend_lun_flags flags; /* passed to CTL */
@@ -187,6 +193,7 @@ struct ctl_be_lun {
be_lun_config_t lun_config_status; /* passed to CTL */
struct ctl_backend_driver *be; /* passed to CTL */
void *ctl_lun; /* used by CTL */
+ STAILQ_HEAD(, ctl_be_lun_option) options; /* passed to CTL */
STAILQ_ENTRY(ctl_be_lun) links; /* used by CTL */
};
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index 9a13fb5..d2d4287 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -1639,6 +1639,7 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
STAILQ_INIT(&be_lun->input_queue);
STAILQ_INIT(&be_lun->config_write_queue);
STAILQ_INIT(&be_lun->datamove_queue);
+ STAILQ_INIT(&be_lun->ctl_be_lun.options);
sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
@@ -1740,6 +1741,16 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
}
num_threads = tmp_num_threads;
+ } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 &&
+ strcmp(req->kern_be_args[i].kname, "dev") != 0) {
+ struct ctl_be_lun_option *opt;
+
+ opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK);
+ opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK);
+ strcpy(opt->name, req->kern_be_args[i].kname);
+ opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK);
+ strcpy(opt->value, req->kern_be_args[i].kvalue);
+ STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links);
}
}
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index 63cc4e0..191e8e4 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -491,7 +491,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_create_params *params;
uint32_t blocksize;
char tmpstr[32];
- int retval;
+ int i, retval;
retval = 0;
params = &req->reqdata.create;
@@ -509,6 +509,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
sizeof(*be_lun));
goto bailout_error;
}
+ STAILQ_INIT(&be_lun->ctl_be_lun.options);
if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
be_lun->ctl_be_lun.lun_type = params->device_type;
@@ -545,6 +546,17 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
be_lun->softc = softc;
+ for (i = 0; i < req->num_be_args; i++) {
+ struct ctl_be_lun_option *opt;
+
+ opt = malloc(sizeof(*opt), M_RAMDISK, M_WAITOK);
+ opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_RAMDISK, M_WAITOK);
+ strcpy(opt->name, req->kern_be_args[i].kname);
+ opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_RAMDISK, M_WAITOK);
+ strcpy(opt->value, req->kern_be_args[i].kvalue);
+ STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links);
+ }
+
be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
be_lun->ctl_be_lun.be_lun = be_lun;
diff --git a/sys/cam/ctl/ctl_frontend.h b/sys/cam/ctl/ctl_frontend.h
index b286476..23f91b4 100644
--- a/sys/cam/ctl/ctl_frontend.h
+++ b/sys/cam/ctl/ctl_frontend.h
@@ -49,6 +49,9 @@ typedef enum {
typedef void (*port_func_t)(void *onoff_arg);
typedef int (*targ_func_t)(void *arg, struct ctl_id targ_id);
typedef int (*lun_func_t)(void *arg, struct ctl_id targ_id, int lun_id);
+typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td);
+typedef int (*fe_devid_t)(struct ctl_scsiio *ctsio, int alloc_len);
/*
* The ctl_frontend structure is the registration mechanism between a FETD
@@ -213,6 +216,8 @@ struct ctl_frontend {
targ_func_t targ_disable; /* passed to CTL */
lun_func_t lun_enable; /* passed to CTL */
lun_func_t lun_disable; /* passed to CTL */
+ fe_ioctl_t ioctl; /* passed to CTL */
+ fe_devid_t devid; /* passed to CTL */
void *targ_lun_arg; /* passed to CTL */
void (*fe_datamove)(union ctl_io *io); /* passed to CTL */
void (*fe_done)(union ctl_io *io); /* passed to CTL */
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index aa00a06..8981b35 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -204,6 +204,8 @@ struct ctl_nexus {
uint32_t targ_port; /* Target port, filled in by PORT */
struct ctl_id targ_target; /* Destination target */
uint32_t targ_lun; /* Destination lun */
+ uint32_t (*lun_map_fn)(void *arg, uint32_t lun);
+ void *lun_map_arg;
};
typedef enum {
diff --git a/sys/cam/scsi/scsi_sa.c b/sys/cam/scsi/scsi_sa.c
index f9b0872..6b941e0 100644
--- a/sys/cam/scsi/scsi_sa.c
+++ b/sys/cam/scsi/scsi_sa.c
@@ -43,6 +43,8 @@ __FBSDID("$FreeBSD$");
#include <sys/mtio.h>
#ifdef _KERNEL
#include <sys/conf.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
#endif
#include <sys/fcntl.h>
#include <sys/devicestat.h>
@@ -223,6 +225,8 @@ struct sa_softc {
u_int32_t max_blk;
u_int32_t min_blk;
u_int32_t maxio;
+ u_int32_t cpi_maxio;
+ int allow_io_split;
u_int32_t comp_algorithm;
u_int32_t saved_comp_algorithm;
u_int32_t media_blksize;
@@ -268,6 +272,10 @@ struct sa_softc {
open_rdonly : 1, /* open read-only */
open_pending_mount : 1, /* open pending mount */
ctrl_mode : 1; /* control device open */
+
+ struct task sysctl_task;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
};
struct sa_quirk_entry {
@@ -426,6 +434,22 @@ static int sardpos(struct cam_periph *periph, int, u_int32_t *);
static int sasetpos(struct cam_periph *periph, int, u_int32_t *);
+#ifndef SA_DEFAULT_IO_SPLIT
+#define SA_DEFAULT_IO_SPLIT 0
+#endif
+
+static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT;
+
+/*
+ * Tunable to allow the user to set a global allow_io_split value. Note
+ * that this WILL GO AWAY in FreeBSD 11.0. Silently splitting the I/O up
+ * is bad behavior, because it hides the true tape block size from the
+ * application.
+ */
+TUNABLE_INT("kern.cam.sa.allow_io_split", &sa_allow_io_split);
+static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0,
+ "CAM Sequential Access Tape Driver");
+
static struct periph_driver sadriver =
{
sainit, "sa",
@@ -1448,6 +1472,49 @@ saasync(void *callback_arg, u_int32_t code,
}
}
+static void
+sasysctlinit(void *context, int pending)
+{
+ struct cam_periph *periph;
+ struct sa_softc *softc;
+ char tmpstr[80], tmpstr2[80];
+
+ periph = (struct cam_periph *)context;
+ /*
+ * If the periph is invalid, no need to setup the sysctls.
+ */
+ if (periph->flags & CAM_PERIPH_INVALID)
+ goto bailout;
+
+ softc = (struct sa_softc *)periph->softc;
+
+ snprintf(tmpstr, sizeof(tmpstr), "CAM SA unit %d", periph->unit_number);
+ snprintf(tmpstr2, sizeof(tmpstr2), "%u", periph->unit_number);
+
+ sysctl_ctx_init(&softc->sysctl_ctx);
+ softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_kern_cam_sa), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (softc->sysctl_tree == NULL)
+ goto bailout;
+
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN,
+ &softc->allow_io_split, 0, "Allow Splitting I/O");
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "maxio", CTLTYPE_INT | CTLFLAG_RD,
+ &softc->maxio, 0, "Maximum I/O size");
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "cpi_maxio", CTLTYPE_INT | CTLFLAG_RD,
+ &softc->cpi_maxio, 0, "Maximum Controller I/O size");
+
+bailout:
+ /*
+ * Release the reference that was held when this task was enqueued.
+ */
+ cam_periph_release(periph);
+}
+
static cam_status
saregister(struct cam_periph *periph, void *arg)
{
@@ -1455,6 +1522,7 @@ saregister(struct cam_periph *periph, void *arg)
struct ccb_getdev *cgd;
struct ccb_pathinq cpi;
caddr_t match;
+ char tmpstr[80];
int i;
cgd = (struct ccb_getdev *)arg;
@@ -1509,21 +1577,55 @@ saregister(struct cam_periph *periph, void *arg)
XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_TAPE);
/*
- * If maxio isn't set, we fall back to DFLTPHYS. If it is set, we
- * take it whether or not it's larger than MAXPHYS. physio will
- * break it down into pieces small enough to fit in a buffer.
+ * Load the default value that is either compiled in, or loaded
+ * in the global kern.cam.sa.allow_io_split tunable.
+ */
+ softc->allow_io_split = sa_allow_io_split;
+
+ /*
+ * Load a per-instance tunable, if it exists. NOTE that this
+ * tunable WILL GO AWAY in FreeBSD 11.0.
+ */
+ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.sa.%u.allow_io_split",
+ periph->unit_number);
+ TUNABLE_INT_FETCH(tmpstr, &softc->allow_io_split);
+
+ /*
+ * If maxio isn't set, we fall back to DFLTPHYS. Otherwise we take
+ * the smaller of cpi.maxio or MAXPHYS.
*/
if (cpi.maxio == 0)
softc->maxio = DFLTPHYS;
+ else if (cpi.maxio > MAXPHYS)
+ softc->maxio = MAXPHYS;
else
softc->maxio = cpi.maxio;
/*
+ * Record the controller's maximum I/O size so we can report it to
+ * the user later.
+ */
+ softc->cpi_maxio = cpi.maxio;
+
+ /*
+ * By default we tell physio that we do not want our I/O split.
+ * The user needs to have a 1:1 mapping between the size of his
+ * write to a tape character device and the size of the write
+ * that actually goes down to the drive.
+ */
+ if (softc->allow_io_split == 0)
+ softc->si_flags = SI_NOSPLIT;
+ else
+ softc->si_flags = 0;
+
+ TASK_INIT(&softc->sysctl_task, 0, sasysctlinit, periph);
+
+ /*
* If the SIM supports unmapped I/O, let physio know that we can
* handle unmapped buffers.
*/
if (cpi.hba_misc & PIM_UNMAPPED)
- softc->si_flags = SI_UNMAPPED;
+ softc->si_flags |= SI_UNMAPPED;
softc->devs.ctl_dev = make_dev(&sa_cdevsw, SAMINOR(SA_CTLDEV,
0, SA_ATYPE_R), UID_ROOT, GID_OPERATOR,
@@ -1586,6 +1688,13 @@ saregister(struct cam_periph *periph, void *arg)
cam_periph_lock(periph);
/*
+ * Bump the peripheral refcount for the sysctl thread, in case we
+ * get invalidated before the thread has a chance to run.
+ */
+ cam_periph_acquire(periph);
+ taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
+
+ /*
* Add an async callback so that we get
* notified if this device goes away.
*/
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
index 759fd7a..92ee7c3 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
@@ -40,6 +40,12 @@
#include <sys/sa.h>
#include <sys/sa_impl.h>
+/*
+ * Number of times that zfs_free_range() took the slow path while doing
+ * a zfs receive. A nonzero value indicates a potential performance problem.
+ */
+uint64_t zfs_free_range_recv_miss;
+
static void dbuf_destroy(dmu_buf_impl_t *db);
static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
@@ -819,20 +825,22 @@ dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
}
dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
- if (dmu_objset_is_receiving(dn->dn_objset)) {
+ mutex_enter(&dn->dn_dbufs_mtx);
+ if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
+ /* There can't be any dbufs in this range; no need to search. */
+ mutex_exit(&dn->dn_dbufs_mtx);
+ return;
+ } else if (dmu_objset_is_receiving(dn->dn_objset)) {
/*
- * When processing a free record from a zfs receive,
- * there should have been no previous modifications to the
- * data in this range. Therefore there should be no dbufs
- * in the range. Searching dn_dbufs for these non-existent
- * dbufs can be very expensive, so simply ignore this.
+ * If we are receiving, we expect there to be no dbufs in
+ * the range to be freed, because receive modifies each
+ * block at most once, and in offset order. If this is
+ * not the case, it can lead to performance problems,
+ * so note that we unexpectedly took the slow path.
*/
- VERIFY3P(dbuf_find(dn, 0, start), ==, NULL);
- VERIFY3P(dbuf_find(dn, 0, end), ==, NULL);
- return;
+ atomic_inc_64(&zfs_free_range_recv_miss);
}
- mutex_enter(&dn->dn_dbufs_mtx);
for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
db_next = list_next(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
@@ -1720,6 +1728,9 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
return (odb);
}
list_insert_head(&dn->dn_dbufs, db);
+ if (db->db_level == 0 && db->db_blkid >=
+ dn->dn_unlisted_l0_blkid)
+ dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
db->db_state = DB_UNCACHED;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
index f344f6a..54decd6 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
@@ -575,98 +575,93 @@ dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
* the end so that the file gets shorter over time (if we crashes in the
* middle, this will leave us in a better state). We find allocated file
* data by simply searching the allocated level 1 indirects.
+ *
+ * On input, *start should be the first offset that does not need to be
+ * freed (e.g. "offset + length"). On return, *start will be the first
+ * offset that should be freed.
*/
static int
-get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t limit)
+get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
{
- uint64_t len = *start - limit;
- uint64_t blkcnt = 0;
- uint64_t maxblks = DMU_MAX_ACCESS / (1ULL << (dn->dn_indblkshift + 1));
+ uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
+ /* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange =
dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
- ASSERT(limit <= *start);
+ ASSERT3U(minimum, <=, *start);
- if (len <= iblkrange * maxblks) {
- *start = limit;
+ if (*start - minimum <= iblkrange * maxblks) {
+ *start = minimum;
return (0);
}
ASSERT(ISP2(iblkrange));
- while (*start > limit && blkcnt < maxblks) {
+ for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
int err;
- /* find next allocated L1 indirect */
+ /*
+ * dnode_next_offset(BACKWARDS) will find an allocated L1
+ * indirect block at or before the input offset. We must
+ * decrement *start so that it is at the end of the region
+ * to search.
+ */
+ (*start)--;
err = dnode_next_offset(dn,
DNODE_FIND_BACKWARDS, start, 2, 1, 0);
- /* if there are no more, then we are done */
+ /* if there are no indirect blocks before start, we are done */
if (err == ESRCH) {
- *start = limit;
- return (0);
- } else if (err) {
+ *start = minimum;
+ break;
+ } else if (err != 0) {
return (err);
}
- blkcnt += 1;
- /* reset offset to end of "next" block back */
+ /* set start to the beginning of this L1 indirect */
*start = P2ALIGN(*start, iblkrange);
- if (*start <= limit)
- *start = limit;
- else
- *start -= 1;
}
+ if (*start < minimum)
+ *start = minimum;
return (0);
}
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
- uint64_t length, boolean_t free_dnode)
+ uint64_t length)
{
- dmu_tx_t *tx;
- uint64_t object_size, start, end, len;
- boolean_t trunc = (length == DMU_OBJECT_END);
- int align, err;
-
- align = 1 << dn->dn_datablkshift;
- ASSERT(align > 0);
- object_size = align == 1 ? dn->dn_datablksz :
- (dn->dn_maxblkid + 1) << dn->dn_datablkshift;
-
- end = offset + length;
- if (trunc || end > object_size)
- end = object_size;
- if (end <= offset)
+ uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
+ int err;
+
+ if (offset >= object_size)
return (0);
- length = end - offset;
- while (length) {
- start = end;
- /* assert(offset <= start) */
- err = get_next_chunk(dn, &start, offset);
+ if (length == DMU_OBJECT_END || offset + length > object_size)
+ length = object_size - offset;
+
+ while (length != 0) {
+ uint64_t chunk_end, chunk_begin;
+
+ chunk_end = chunk_begin = offset + length;
+
+ /* move chunk_begin backwards to the beginning of this chunk */
+ err = get_next_chunk(dn, &chunk_begin, offset);
if (err)
return (err);
- len = trunc ? DMU_OBJECT_END : end - start;
+ ASSERT3U(chunk_begin, >=, offset);
+ ASSERT3U(chunk_begin, <=, chunk_end);
- tx = dmu_tx_create(os);
- dmu_tx_hold_free(tx, dn->dn_object, start, len);
+ dmu_tx_t *tx = dmu_tx_create(os);
+ dmu_tx_hold_free(tx, dn->dn_object,
+ chunk_begin, chunk_end - chunk_begin);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
-
- dnode_free_range(dn, start, trunc ? -1 : len, tx);
-
- if (start == 0 && free_dnode) {
- ASSERT(trunc);
- dnode_free(dn, tx);
- }
-
- length -= end - start;
-
+ dnode_free_range(dn, chunk_begin, chunk_end - chunk_begin, tx);
dmu_tx_commit(tx);
- end = start;
+
+ length -= chunk_end - chunk_begin;
}
return (0);
}
@@ -681,38 +676,32 @@ dmu_free_long_range(objset_t *os, uint64_t object,
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
- err = dmu_free_long_range_impl(os, dn, offset, length, FALSE);
+ err = dmu_free_long_range_impl(os, dn, offset, length);
dnode_rele(dn, FTAG);
return (err);
}
int
-dmu_free_object(objset_t *os, uint64_t object)
+dmu_free_long_object(objset_t *os, uint64_t object)
{
- dnode_t *dn;
dmu_tx_t *tx;
int err;
- err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
- FTAG, &dn);
+ err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
if (err != 0)
return (err);
- if (dn->dn_nlevels == 1) {
- tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, object);
- dmu_tx_hold_free(tx, dn->dn_object, 0, DMU_OBJECT_END);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err == 0) {
- dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
- dnode_free(dn, tx);
- dmu_tx_commit(tx);
- } else {
- dmu_tx_abort(tx);
- }
+
+ tx = dmu_tx_create(os);
+ dmu_tx_hold_bonus(tx, object);
+ dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err == 0) {
+ err = dmu_object_free(os, object, tx);
+ dmu_tx_commit(tx);
} else {
- err = dmu_free_long_range_impl(os, dn, 0, DMU_OBJECT_END, TRUE);
+ dmu_tx_abort(tx);
}
- dnode_rele(dn, FTAG);
+
return (err);
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
index f636253..c8b5f94 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
@@ -1286,7 +1286,7 @@ restore_freeobjects(struct restorearg *ra, objset_t *os,
if (dmu_object_info(os, obj, NULL) != 0)
continue;
- err = dmu_free_object(os, obj);
+ err = dmu_free_long_object(os, obj);
if (err != 0)
return (err);
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c
index 8d23aa3..dddaa0a 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c
@@ -615,7 +615,8 @@ dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
* if they are blocksize-aligned.
*/
if (dn->dn_datablkshift == 0) {
- dmu_tx_count_write(txh, off, len);
+ if (off != 0 || len < dn->dn_datablksz)
+ dmu_tx_count_write(txh, off, len);
} else {
/* first block will be modified if it is not aligned */
if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c
index bf47278..c943a0f 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c
@@ -117,6 +117,7 @@ dnode_cons(void *arg, void *unused, int kmflag)
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
+ dn->dn_unlisted_l0_blkid = 0;
list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
@@ -170,6 +171,7 @@ dnode_dest(void *arg, void *unused)
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
+ ASSERT0(dn->dn_unlisted_l0_blkid);
list_destroy(&dn->dn_dbufs);
}
@@ -475,6 +477,7 @@ dnode_destroy(dnode_t *dn)
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_id_flags = 0;
+ dn->dn_unlisted_l0_blkid = 0;
dmu_zfetch_rele(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
@@ -705,6 +708,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ASSERT(list_is_empty(&ndn->dn_dbufs));
list_move_tail(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
+ ndn->dn_unlisted_l0_blkid = odn->dn_unlisted_l0_blkid;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
@@ -739,6 +743,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
list_create(&odn->dn_dbufs, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
+ odn->dn_unlisted_l0_blkid = 0;
odn->dn_bonus = NULL;
odn->dn_zfetch.zf_dnode = NULL;
@@ -1528,7 +1533,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
- if (len == -1ULL) {
+ if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
index ff6e682..1b69ed52 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
@@ -101,9 +101,8 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
used, compressed, uncompressed);
return;
}
- dmu_buf_will_dirty(ds->ds_dbuf, tx);
- mutex_enter(&ds->ds_dir->dd_lock);
+ dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_lock);
delta = parent_delta(ds, used);
ds->ds_phys->ds_referenced_bytes += used;
@@ -115,7 +114,6 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
compressed, uncompressed, tx);
dsl_dir_transfer_space(ds->ds_dir, used - delta,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
- mutex_exit(&ds->ds_dir->dd_lock);
}
int
@@ -150,7 +148,6 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
- mutex_enter(&ds->ds_dir->dd_lock);
mutex_enter(&ds->ds_lock);
ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
@@ -161,7 +158,6 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
delta, -compressed, -uncompressed, tx);
dsl_dir_transfer_space(ds->ds_dir, -used - delta,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
- mutex_exit(&ds->ds_dir->dd_lock);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
@@ -596,31 +592,6 @@ dsl_dataset_name(dsl_dataset_t *ds, char *name)
}
}
-static int
-dsl_dataset_namelen(dsl_dataset_t *ds)
-{
- int result;
-
- if (ds == NULL) {
- result = 3; /* "mos" */
- } else {
- result = dsl_dir_namelen(ds->ds_dir);
- VERIFY0(dsl_dataset_get_snapname(ds));
- if (ds->ds_snapname[0]) {
- ++result; /* adding one for the @-sign */
- if (!MUTEX_HELD(&ds->ds_lock)) {
- mutex_enter(&ds->ds_lock);
- result += strlen(ds->ds_snapname);
- mutex_exit(&ds->ds_lock);
- } else {
- result += strlen(ds->ds_snapname);
- }
- }
- }
-
- return (result);
-}
-
void
dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
{
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c
index ef46df8..b002df7 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c
@@ -896,7 +896,7 @@ dsl_destroy_head(const char *name)
for (uint64_t obj = 0; error == 0;
error = dmu_object_next(os, &obj, FALSE,
prev_snap_txg))
- (void) dmu_free_object(os, obj);
+ (void) dmu_free_long_object(os, obj);
/* sync out all frees */
txg_wait_synced(dmu_objset_pool(os), 0);
dmu_objset_disown(os, FTAG);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c
index 7d0b104..4c86f9c 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c
@@ -845,11 +845,21 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
{
int64_t accounted_delta;
+
+ /*
+ * dsl_dataset_set_refreservation_sync_impl() calls this with
+ * dd_lock held, so that it can atomically update
+ * ds->ds_reserved and the dsl_dir accounting, so that
+ * dsl_dataset_check_quota() can see dataset and dir accounting
+ * consistently.
+ */
boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(type < DD_USED_NUM);
+ dmu_buf_will_dirty(dd->dd_dbuf, tx);
+
if (needlock)
mutex_enter(&dd->dd_lock);
accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
@@ -858,7 +868,6 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
dd->dd_phys->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
- dmu_buf_will_dirty(dd->dd_dbuf, tx);
dd->dd_phys->dd_used_bytes += used;
dd->dd_phys->dd_uncompressed_bytes += uncompressed;
dd->dd_phys->dd_compressed_bytes += compressed;
@@ -891,8 +900,6 @@ void
dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
- boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
-
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
@@ -900,17 +907,15 @@ dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
return;
- if (needlock)
- mutex_enter(&dd->dd_lock);
+ dmu_buf_will_dirty(dd->dd_dbuf, tx);
+ mutex_enter(&dd->dd_lock);
ASSERT(delta > 0 ?
dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
- dmu_buf_will_dirty(dd->dd_dbuf, tx);
dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
dd->dd_phys->dd_used_breakdown[newtype] += delta;
- if (needlock)
- mutex_exit(&dd->dd_lock);
+ mutex_exit(&dd->dd_lock);
}
typedef struct dsl_dir_set_qr_arg {
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
index eb7ed24..d2b44cda 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
@@ -583,7 +583,7 @@ int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx);
int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size);
-int dmu_free_object(objset_t *os, uint64_t object);
+int dmu_free_long_object(objset_t *os, uint64_t object);
/*
* Convenience functions.
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h
index c3de03d..55b87bc 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
#ifndef _SYS_DNODE_H
@@ -188,6 +188,8 @@ typedef struct dnode {
/* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */
uint32_t dn_dbufs_count; /* count of dn_dbufs */
+ /* There are no level-0 blocks of this blkid or higher in dn_dbufs */
+ uint64_t dn_unlisted_l0_blkid;
/* protected by os_lock: */
list_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/ctf_api.h b/sys/cddl/contrib/opensolaris/uts/common/sys/ctf_api.h
index b810f07..def14b7 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/sys/ctf_api.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/sys/ctf_api.h
@@ -23,6 +23,9 @@
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
+/*
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ */
/*
* This header file defines the interfaces available from the CTF debugger
@@ -40,8 +43,6 @@
#ifndef _CTF_API_H
#define _CTF_API_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/elf.h>
@@ -227,6 +228,8 @@ extern int ctf_add_member(ctf_file_t *, ctf_id_t, const char *, ctf_id_t);
extern int ctf_set_array(ctf_file_t *, ctf_id_t, const ctf_arinfo_t *);
+extern int ctf_delete_type(ctf_file_t *, ctf_id_t);
+
extern int ctf_update(ctf_file_t *);
extern int ctf_discard(ctf_file_t *);
extern int ctf_write(ctf_file_t *, int);
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index c898ef8..1d5fa76 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -304,6 +304,7 @@ dev/tpm/tpm_acpi.c optional tpm acpi
dev/tpm/tpm_isa.c optional tpm isa
dev/uart/uart_cpu_x86.c optional uart
dev/viawd/viawd.c optional viawd
+dev/vmware/vmxnet3/if_vmx.c optional vmx
dev/wbwd/wbwd.c optional wbwd
dev/wpi/if_wpi.c optional wpi
dev/isci/isci.c optional isci
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index 9a27bde..4f8f8ed 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -267,6 +267,7 @@ dev/tpm/tpm_acpi.c optional tpm acpi
dev/tpm/tpm_isa.c optional tpm isa
dev/uart/uart_cpu_x86.c optional uart
dev/viawd/viawd.c optional viawd
+dev/vmware/vmxnet3/if_vmx.c optional vmx
dev/acpica/acpi_if.m standard
dev/acpi_support/acpi_wmi_if.m standard
dev/wbwd/wbwd.c optional wbwd
diff --git a/sys/contrib/dev/acpica/acpica_prep.sh b/sys/contrib/dev/acpica/acpica_prep.sh
index b82586d..4a467a1 100755
--- a/sys/contrib/dev/acpica/acpica_prep.sh
+++ b/sys/contrib/dev/acpica/acpica_prep.sh
@@ -21,7 +21,7 @@ stripdirs="generate libraries tests tools"
stripfiles="Makefile README accygwin.h acefi.h achaiku.h acintel.h \
aclinux.h acmacosx.h acmsvc.h acnetbsd.h acos2.h acwin.h \
acwin64.h new_table.txt osfreebsdtbl.c oslinuxtbl.c osunixdir.c \
- oswindir.c oswintbl.c oswinxf.c readme.txt utclib.c"
+ osunixmap.c oswindir.c oswintbl.c oswinxf.c readme.txt utclib.c"
# include files to canonify
src_headers="acapps.h acbuffer.h accommon.h acconfig.h acdebug.h \
diff --git a/sys/contrib/dev/acpica/changes.txt b/sys/contrib/dev/acpica/changes.txt
index 83c156b..ab43358 100644
--- a/sys/contrib/dev/acpica/changes.txt
+++ b/sys/contrib/dev/acpica/changes.txt
@@ -1,4 +1,88 @@
----------------------------------------
+23 August 2013. Summary of changes for version 20130823:
+
+1) ACPICA kernel-resident subsystem:
+
+Implemented support for host-installed System Control Interrupt (SCI)
+handlers. Certain ACPI functionality requires the host to handle raw
+SCIs. For example, the "SCI Doorbell" that is defined for memory power
+state support requires the host device driver to handle SCIs to examine
+if the doorbell has been activated. Multiple SCI handlers can be
+installed to allow for future expansion. New external interfaces are
+AcpiInstallSciHandler, AcpiRemoveSciHandler; see the ACPICA reference for
+details. Lv Zheng, Bob Moore. ACPICA BZ 1032.
+
+Operation region support: Never locally free the handler "context"
+pointer. This change removes some dangerous code that attempts to free
+the handler context pointer in some (rare) circumstances. The owner of
+the handler owns this pointer and the ACPICA code should never touch it.
+Although not seen to be an issue in any kernel, it did show up as a
+problem (fault) under AcpiExec. Also, set the internal storage field for
+the context pointer to zero when the region is deactivated, simply for
+sanity. David Box. ACPICA BZ 1039.
+
+AcpiRead: On error, do not modify the return value target location. If an
+error happens in the middle of a split 32/32 64-bit I/O operation, do not
+modify the target of the return value pointer. Makes the code consistent
+with the rest of ACPICA. Bjorn Helgaas.
+
+Example Code and Data Size: These are the sizes for the OS-independent
+acpica.lib produced by the Microsoft Visual C++ 9.0 32-bit compiler. The
+debug version of the code includes the debug output trace mechanism and
+has a much larger code and data size.
+
+ Current Release:
+ Non-Debug Version: 96.7K Code, 27.1K Data, 123.9K Total
+ Debug Version: 184.4K Code, 76.8K Data, 261.2K Total
+ Previous Release:
+ Non-Debug Version: 96.2K Code, 27.1K Data, 123.3K Total
+ Debug Version: 185.4K Code, 77.1K Data, 262.5K Total
+
+
+2) iASL Compiler/Disassembler and Tools:
+
+AcpiDump: Implemented several new features and fixed some problems:
+1) Added support to dump the RSDP, RSDT, and XSDT tables.
+2) Added support for multiple table instances (SSDT, UEFI).
+3) Added option to dump "customized" (overridden) tables (-c).
+4) Fixed a problem where some table filenames were improperly
+constructed.
+5) Improved some error messages, removed some unnecessary messages.
+
+iASL: Implemented additional support for disassembly of ACPI tables that
+contain invocations of external control methods. The -fe<file> option
+allows the import of a file that specifies the external methods along
+with the required number of arguments for each -- allowing for the
+correct disassembly of the table. This is a workaround for a limitation
+of AML code where the disassembler often cannot determine the number of
+arguments required for an external control method and generates incorrect
+ASL code. See the iASL reference for details. ACPICA BZ 1030.
+
+Debugger: Implemented a new command (paths) that displays the full
+pathnames (namepaths) and object types of all objects in the namespace.
+This is an alternative to the namespace command.
+
+Debugger: Implemented a new command (sci) that invokes the SCI dispatch
+mechanism and any installed handlers.
+
+iASL: Fixed a possible segfault for "too many parent prefixes" condition.
+This can occur if there are too many parent prefixes in a namepath (for
+example, ^^^^^^PCI0.ECRD). ACPICA BZ 1035.
+
+Application OSLs: Set the return value for the PCI read functions. These
+functions simply return AE_OK, but should set the return value to zero
+also. This change implements this. ACPICA BZ 1038.
+
+Debugger: Prevent possible command line buffer overflow. Increase the
+size of a couple of the debugger line buffers, and ensure that overflow
+cannot happen. ACPICA BZ 1037.
+
+iASL: Changed to abort immediately on serious errors during the parsing
+phase. Due to the nature of ASL, there is no point in attempting to
+compile these types of errors, and they typically end up causing a
+cascade of hundreds of errors which obscure the original problem.
+
+----------------------------------------
25 July 2013. Summary of changes for version 20130725:
1) ACPICA kernel-resident subsystem:
diff --git a/sys/contrib/dev/acpica/common/adisasm.c b/sys/contrib/dev/acpica/common/adisasm.c
index 6065bd1..c361a6a 100644
--- a/sys/contrib/dev/acpica/common/adisasm.c
+++ b/sys/contrib/dev/acpica/common/adisasm.c
@@ -341,6 +341,10 @@ AdAmlDisassemble (
{
AcpiDmClearExternalList ();
}
+
+ /* Load any externals defined in the optional external ref file */
+
+ AcpiDmGetExternalsFromFile ();
}
else
{
diff --git a/sys/contrib/dev/acpica/common/adwalk.c b/sys/contrib/dev/acpica/common/adwalk.c
index fb136b9..80f6d83 100644
--- a/sys/contrib/dev/acpica/common/adwalk.c
+++ b/sys/contrib/dev/acpica/common/adwalk.c
@@ -373,10 +373,18 @@ AcpiDmDumpDescending (
switch (Op->Common.AmlOpcode)
{
case AML_BYTE_OP:
+
+ AcpiOsPrintf ("%2.2X", (UINT32) Op->Common.Value.Integer);
+ break;
+
case AML_WORD_OP:
+
+ AcpiOsPrintf ("%4.4X", (UINT32) Op->Common.Value.Integer);
+ break;
+
case AML_DWORD_OP:
- AcpiOsPrintf ("%X", (UINT32) Op->Common.Value.Integer);
+ AcpiOsPrintf ("%8.8X", (UINT32) Op->Common.Value.Integer);
break;
case AML_QWORD_OP:
diff --git a/sys/contrib/dev/acpica/common/dmextern.c b/sys/contrib/dev/acpica/common/dmextern.c
index 3ded428..56e2781 100644
--- a/sys/contrib/dev/acpica/common/dmextern.c
+++ b/sys/contrib/dev/acpica/common/dmextern.c
@@ -46,7 +46,9 @@
#include <contrib/dev/acpica/include/amlcode.h>
#include <contrib/dev/acpica/include/acnamesp.h>
#include <contrib/dev/acpica/include/acdisasm.h>
+#include <contrib/dev/acpica/compiler/aslcompiler.h>
#include <stdio.h>
+#include <errno.h>
/*
@@ -87,6 +89,8 @@ static const char *AcpiGbl_DmTypeNames[] =
/* 19 */ ", FieldUnitObj"
};
+#define METHOD_SEPARATORS " \t,()\n"
+
/* Local prototypes */
@@ -99,6 +103,12 @@ AcpiDmNormalizeParentPrefix (
ACPI_PARSE_OBJECT *Op,
char *Path);
+static void
+AcpiDmAddToExternalListFromFile (
+ char *Path,
+ UINT8 Type,
+ UINT32 Value);
+
/*******************************************************************************
*
@@ -444,7 +454,7 @@ AcpiDmAddToExternalList (
(NextExternal->Value != Value))
{
ACPI_ERROR ((AE_INFO,
- "Argument count mismatch for method %s %u %u",
+ "External method arg count mismatch %s: Current %u, attempted %u",
NextExternal->Path, NextExternal->Value, Value));
}
@@ -536,6 +546,275 @@ AcpiDmAddToExternalList (
/*******************************************************************************
*
+ * FUNCTION: AcpiDmGetExternalsFromFile
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Process the optional external reference file.
+ *
+ * Each line in the file should be of the form:
+ * External (<Method namepath>, MethodObj, <ArgCount>)
+ *
+ * Example:
+ * External (_SB_.PCI0.XHC_.PS0X, MethodObj, 4)
+ *
+ ******************************************************************************/
+
+void
+AcpiDmGetExternalsFromFile (
+ void)
+{
+ FILE *ExternalRefFile;
+ char *Token;
+ char *MethodName;
+ UINT32 ArgCount;
+ UINT32 ImportCount = 0;
+
+
+ if (!Gbl_ExternalRefFilename)
+ {
+ return;
+ }
+
+ /* Open the file */
+
+ ExternalRefFile = fopen (Gbl_ExternalRefFilename, "r");
+ if (!ExternalRefFile)
+ {
+ fprintf (stderr, "Could not open external reference file \"%s\"\n",
+ Gbl_ExternalRefFilename);
+ return;
+ }
+
+ /* Each line defines a method */
+
+ while (fgets (StringBuffer, ASL_MSG_BUFFER_SIZE, ExternalRefFile))
+ {
+ Token = strtok (StringBuffer, METHOD_SEPARATORS); /* "External" */
+ if (!Token) continue;
+ if (strcmp (Token, "External")) continue;
+
+ MethodName = strtok (NULL, METHOD_SEPARATORS); /* Method namepath */
+ if (!MethodName) continue;
+
+ Token = strtok (NULL, METHOD_SEPARATORS); /* "MethodObj" */
+ if (!Token) continue;
+ if (strcmp (Token, "MethodObj")) continue;
+
+ Token = strtok (NULL, METHOD_SEPARATORS); /* Arg count */
+ if (!Token) continue;
+
+ /* Convert arg count string to an integer */
+
+ errno = 0;
+ ArgCount = strtoul (Token, NULL, 0);
+ if (errno)
+ {
+ fprintf (stderr, "Invalid argument count (%s)\n", Token);
+ continue;
+ }
+ if (ArgCount > 7)
+ {
+ fprintf (stderr, "Invalid argument count (%u)\n", ArgCount);
+ continue;
+ }
+
+ /* Add this external to the global list */
+
+ AcpiOsPrintf ("%s: Importing method external (%u arguments) %s\n",
+ Gbl_ExternalRefFilename, ArgCount, MethodName);
+
+ AcpiDmAddToExternalListFromFile (MethodName, ACPI_TYPE_METHOD, ArgCount | 0x80);
+ ImportCount++;
+ }
+
+ if (!ImportCount)
+ {
+ fprintf (stderr, "Did not find any external methods in reference file \"%s\"\n",
+ Gbl_ExternalRefFilename);
+ }
+ else
+ {
+ /* Add the external(s) to the namespace */
+
+ AcpiDmAddExternalsToNamespace ();
+
+ AcpiOsPrintf ("%s: Imported %u external method definitions\n",
+ Gbl_ExternalRefFilename, ImportCount);
+ }
+
+ fclose (ExternalRefFile);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: AcpiDmAddToExternalListFromFile
+ *
+ * PARAMETERS: Path - Internal (AML) path to the object
+ * Type - ACPI object type to be added
+ * Value - Arg count if adding a Method object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Insert a new name into the global list of Externals which
+ * will in turn be later emitted as an External() declaration
+ * in the disassembled output.
+ *
+ ******************************************************************************/
+
+static void
+AcpiDmAddToExternalListFromFile (
+ char *Path,
+ UINT8 Type,
+ UINT32 Value)
+{
+ char *InternalPath;
+ char *ExternalPath;
+ ACPI_EXTERNAL_LIST *NewExternal;
+ ACPI_EXTERNAL_LIST *NextExternal;
+ ACPI_EXTERNAL_LIST *PrevExternal = NULL;
+ ACPI_STATUS Status;
+ BOOLEAN Resolved = FALSE;
+
+
+ if (!Path)
+ {
+ return;
+ }
+
+ /* TBD: Add a flags parameter */
+
+ if (Type == ACPI_TYPE_METHOD)
+ {
+ if (Value & 0x80)
+ {
+ Resolved = TRUE;
+ }
+ Value &= 0x07;
+ }
+
+ /*
+ * We don't want External() statements to contain a leading '\'.
+ * This prevents duplicate external statements of the form:
+ *
+ * External (\ABCD)
+ * External (ABCD)
+ *
+ * This would cause a compile time error when the disassembled
+ * output file is recompiled.
+ */
+ if ((*Path == AML_ROOT_PREFIX) && (Path[1]))
+ {
+ Path++;
+ }
+
+ /* Check all existing externals to ensure no duplicates */
+
+ NextExternal = AcpiGbl_ExternalList;
+ while (NextExternal)
+ {
+ if (!ACPI_STRCMP (Path, NextExternal->Path))
+ {
+ /* Duplicate method, check that the Value (ArgCount) is the same */
+
+ if ((NextExternal->Type == ACPI_TYPE_METHOD) &&
+ (NextExternal->Value != Value))
+ {
+ ACPI_ERROR ((AE_INFO,
+ "(File) External method arg count mismatch %s: Current %u, override to %u",
+ NextExternal->Path, NextExternal->Value, Value));
+
+ /* Override, since new value came from external reference file */
+
+ NextExternal->Value = Value;
+ }
+
+ /* Allow upgrade of type from ANY */
+
+ else if (NextExternal->Type == ACPI_TYPE_ANY)
+ {
+ NextExternal->Type = Type;
+ NextExternal->Value = Value;
+ }
+
+ return;
+ }
+
+ NextExternal = NextExternal->Next;
+ }
+
+ /* Get the internal pathname (AML format) */
+
+ Status = AcpiNsInternalizeName (Path, &InternalPath);
+ if (ACPI_FAILURE (Status))
+ {
+ return;
+ }
+
+ /* Allocate and init a new External() descriptor */
+
+ NewExternal = ACPI_ALLOCATE_ZEROED (sizeof (ACPI_EXTERNAL_LIST));
+ if (!NewExternal)
+ {
+ ACPI_FREE (InternalPath);
+ return;
+ }
+
+ /* Must copy and normalize the input path */
+
+ AcpiNsExternalizeName (ACPI_UINT32_MAX, InternalPath, NULL, &ExternalPath);
+
+ NewExternal->Path = ExternalPath;
+ NewExternal->Type = Type;
+ NewExternal->Value = Value;
+ NewExternal->Resolved = Resolved;
+ NewExternal->Length = (UINT16) ACPI_STRLEN (Path);
+ NewExternal->InternalPath = InternalPath;
+
+ /* Set flag to indicate External->InternalPath needs to be freed */
+
+ NewExternal->Flags |= ACPI_IPATH_ALLOCATED | ACPI_FROM_REFERENCE_FILE;
+
+ /* Link the new descriptor into the global list, alphabetically ordered */
+
+ NextExternal = AcpiGbl_ExternalList;
+ while (NextExternal)
+ {
+ if (AcpiUtStricmp (NewExternal->Path, NextExternal->Path) < 0)
+ {
+ if (PrevExternal)
+ {
+ PrevExternal->Next = NewExternal;
+ }
+ else
+ {
+ AcpiGbl_ExternalList = NewExternal;
+ }
+
+ NewExternal->Next = NextExternal;
+ return;
+ }
+
+ PrevExternal = NextExternal;
+ NextExternal = NextExternal->Next;
+ }
+
+ if (PrevExternal)
+ {
+ PrevExternal->Next = NewExternal;
+ }
+ else
+ {
+ AcpiGbl_ExternalList = NewExternal;
+ }
+}
+
+
+/*******************************************************************************
+ *
* FUNCTION: AcpiDmAddExternalsToNamespace
*
* PARAMETERS: None
@@ -563,7 +842,7 @@ AcpiDmAddExternalsToNamespace (
Status = AcpiNsLookup (NULL, External->InternalPath, External->Type,
ACPI_IMODE_LOAD_PASS1,
- ACPI_NS_EXTERNAL | ACPI_NS_DONT_OPEN_SCOPE,
+ ACPI_NS_ERROR_IF_FOUND | ACPI_NS_EXTERNAL | ACPI_NS_DONT_OPEN_SCOPE,
NULL, &Node);
if (ACPI_FAILURE (Status))
@@ -731,7 +1010,8 @@ AcpiDmEmitExternals (
NextExternal->Path,
AcpiDmGetObjectTypeName (NextExternal->Type));
- AcpiOsPrintf (") // Warning: Unresolved Method, "
+ AcpiOsPrintf (
+ ") // Warning: Unresolved Method, "
"guessing %u arguments (may be incorrect, see warning above)\n",
NextExternal->Value);
@@ -743,9 +1023,45 @@ AcpiDmEmitExternals (
AcpiOsPrintf ("\n");
+
+ /* Emit externals that were imported from a file */
+
+ if (Gbl_ExternalRefFilename)
+ {
+ AcpiOsPrintf (
+ " /*\n * External declarations that were imported from\n"
+ " * the reference file [%s]\n */\n",
+ Gbl_ExternalRefFilename);
+
+ NextExternal = AcpiGbl_ExternalList;
+ while (NextExternal)
+ {
+ if (!NextExternal->Emitted && (NextExternal->Flags & ACPI_FROM_REFERENCE_FILE))
+ {
+ AcpiOsPrintf (" External (%s%s",
+ NextExternal->Path,
+ AcpiDmGetObjectTypeName (NextExternal->Type));
+
+ if (NextExternal->Type == ACPI_TYPE_METHOD)
+ {
+ AcpiOsPrintf (") // %u Arguments\n",
+ NextExternal->Value);
+ }
+ else
+ {
+ AcpiOsPrintf (")\n");
+ }
+ NextExternal->Emitted = TRUE;
+ }
+
+ NextExternal = NextExternal->Next;
+ }
+
+ AcpiOsPrintf ("\n");
+ }
+
/*
- * Walk the list of externals (unresolved references)
- * found during the AML parsing
+ * Walk the list of externals found during the AML parsing
*/
while (AcpiGbl_ExternalList)
{
diff --git a/sys/contrib/dev/acpica/common/dmtable.c b/sys/contrib/dev/acpica/common/dmtable.c
index 6aab9e5..e7ce520 100644
--- a/sys/contrib/dev/acpica/common/dmtable.c
+++ b/sys/contrib/dev/acpica/common/dmtable.c
@@ -446,7 +446,7 @@ AcpiDmDumpDataTable (
Length = Table->Length;
AcpiDmDumpTable (Length, 0, Table, 0, AcpiDmTableInfoFacs);
}
- else if (ACPI_COMPARE_NAME (Table->Signature, ACPI_SIG_RSDP))
+ else if (ACPI_VALIDATE_RSDP_SIG (Table->Signature))
{
Length = AcpiDmDumpRsdp (Table);
}
diff --git a/sys/contrib/dev/acpica/compiler/aslcompile.c b/sys/contrib/dev/acpica/compiler/aslcompile.c
index 7b2393c..48ccfa3 100644
--- a/sys/contrib/dev/acpica/compiler/aslcompile.c
+++ b/sys/contrib/dev/acpica/compiler/aslcompile.c
@@ -593,10 +593,15 @@ CmDoCompile (
AslCompilerparse();
UtEndEvent (Event);
- /* Flush out any remaining source after parse tree is complete */
+ /* Check for parse errors */
- Event = UtBeginEvent ("Flush source input");
- CmFlushSourceCode ();
+ Status = AslCheckForErrorExit ();
+ if (ACPI_FAILURE (Status))
+ {
+ fprintf (stderr, "Compiler aborting due to parser-detected syntax error(s)\n");
+ LsDumpParseTree ();
+ goto ErrorExit;
+ }
/* Did the parse tree get successfully constructed? */
@@ -606,16 +611,18 @@ CmDoCompile (
* If there are no errors, then we have some sort of
* internal problem.
*/
- Status = AslCheckForErrorExit ();
- if (Status == AE_OK)
- {
- AslError (ASL_ERROR, ASL_MSG_COMPILER_INTERNAL,
- NULL, "- Could not resolve parse tree root node");
- }
+ AslError (ASL_ERROR, ASL_MSG_COMPILER_INTERNAL,
+ NULL, "- Could not resolve parse tree root node");
goto ErrorExit;
}
+
+ /* Flush out any remaining source after parse tree is complete */
+
+ Event = UtBeginEvent ("Flush source input");
+ CmFlushSourceCode ();
+
/* Optional parse tree dump, compiler debug output only */
LsDumpParseTree ();
diff --git a/sys/contrib/dev/acpica/compiler/aslglobal.h b/sys/contrib/dev/acpica/compiler/aslglobal.h
index e1be4ae..3f2c401 100644
--- a/sys/contrib/dev/acpica/compiler/aslglobal.h
+++ b/sys/contrib/dev/acpica/compiler/aslglobal.h
@@ -186,6 +186,7 @@ ASL_EXTERN char ASL_INIT_GLOBAL (*Gbl_IncludeFilename, NULL)
ASL_EXTERN char ASL_INIT_GLOBAL (*Gbl_OutputFilenamePrefix, NULL);
ASL_EXTERN ASL_INCLUDE_DIR ASL_INIT_GLOBAL (*Gbl_IncludeDirList, NULL);
ASL_EXTERN char *Gbl_CurrentInputFilename;
+ASL_EXTERN char ASL_INIT_GLOBAL (*Gbl_ExternalRefFilename, NULL);
ASL_EXTERN BOOLEAN ASL_INIT_GLOBAL (Gbl_HasIncludeFiles, FALSE);
diff --git a/sys/contrib/dev/acpica/compiler/aslload.c b/sys/contrib/dev/acpica/compiler/aslload.c
index 57519d1..bc4d593 100644
--- a/sys/contrib/dev/acpica/compiler/aslload.c
+++ b/sys/contrib/dev/acpica/compiler/aslload.c
@@ -487,6 +487,10 @@ LdNamespace1Begin (
ACPI_TYPE_LOCAL_SCOPE,
ACPI_IMODE_LOAD_PASS1, Flags,
WalkState, &(Node));
+ if (ACPI_FAILURE (Status))
+ {
+ return_ACPI_STATUS (Status);
+ }
/*
* However, this is an error -- primarily because the MS
diff --git a/sys/contrib/dev/acpica/compiler/aslmain.c b/sys/contrib/dev/acpica/compiler/aslmain.c
index 2a70588..67e561a 100644
--- a/sys/contrib/dev/acpica/compiler/aslmain.c
+++ b/sys/contrib/dev/acpica/compiler/aslmain.c
@@ -138,6 +138,7 @@ Usage (
ACPI_OPTION ("-dc <f1,f2>", "Disassemble AML and immediately compile it");
ACPI_OPTION ("", " (Obtain DSDT from current system if no input file)");
ACPI_OPTION ("-e <f1,f2>", "Include ACPI table(s) for external symbol resolution");
+ ACPI_OPTION ("-fe <file>", "Specify external symbol declaration file");
ACPI_OPTION ("-g", "Get ACPI tables and write to files (*.dat)");
ACPI_OPTION ("-in", "Ignore NoOp opcodes");
ACPI_OPTION ("-vt", "Dump binary table data in hex format within output file");
diff --git a/sys/contrib/dev/acpica/compiler/asloptions.c b/sys/contrib/dev/acpica/compiler/asloptions.c
index 877fb68..66a9249 100644
--- a/sys/contrib/dev/acpica/compiler/asloptions.c
+++ b/sys/contrib/dev/acpica/compiler/asloptions.c
@@ -68,7 +68,7 @@ AslDoResponseFile (
#define ASL_TOKEN_SEPARATORS " \t\n"
-#define ASL_SUPPORTED_OPTIONS "@:b|c|d^D:e:fgh^i|I:l^m:no|p:P^r:s|t|T+G^v^w|x:z"
+#define ASL_SUPPORTED_OPTIONS "@:b|c|d^D:e:f^gh^i|I:l^m:no|p:P^r:s|t|T+G^v^w|x:z"
/*******************************************************************************
@@ -136,8 +136,7 @@ AslCommandLine (
if (BadCommandLine)
{
- printf ("\n");
- Usage ();
+ printf ("Use -h option for help information\n");
exit (1);
}
@@ -276,9 +275,30 @@ AslDoOptions (
}
break;
- case 'f': /* Ignore errors and force creation of aml file */
+ case 'f':
+
+ switch (AcpiGbl_Optarg[0])
+ {
+ case '^': /* Ignore errors and force creation of aml file */
+
+ Gbl_IgnoreErrors = TRUE;
+ break;
+
+ case 'e': /* Disassembler: Get external declaration file */
+
+ if (AcpiGetoptArgument (argc, argv))
+ {
+ return (-1);
+ }
+
+ Gbl_ExternalRefFilename = AcpiGbl_Optarg;
+ break;
- Gbl_IgnoreErrors = TRUE;
+ default:
+
+ printf ("Unknown option: -f%s\n", AcpiGbl_Optarg);
+ return (-1);
+ }
break;
case 'G':
diff --git a/sys/contrib/dev/acpica/compiler/dtcompile.c b/sys/contrib/dev/acpica/compiler/dtcompile.c
index 1b24af26..3407e1d 100644
--- a/sys/contrib/dev/acpica/compiler/dtcompile.c
+++ b/sys/contrib/dev/acpica/compiler/dtcompile.c
@@ -317,7 +317,7 @@ DtCompileDataTable (
DtSetTableLength ();
return (Status);
}
- else if (ACPI_COMPARE_NAME (Signature, ACPI_SIG_RSDP))
+ else if (ACPI_VALIDATE_RSDP_SIG (Signature))
{
Status = DtCompileRsdp (FieldList);
return (Status);
diff --git a/sys/contrib/dev/acpica/components/debugger/dbcmds.c b/sys/contrib/dev/acpica/components/debugger/dbcmds.c
index 89c6333..921f428 100644
--- a/sys/contrib/dev/acpica/components/debugger/dbcmds.c
+++ b/sys/contrib/dev/acpica/components/debugger/dbcmds.c
@@ -1211,6 +1211,14 @@ AcpiDbGenerateGpe (
(void) AcpiEvGpeDispatch (NULL, GpeEventInfo, GpeNumber);
}
+
+void
+AcpiDbGenerateSci (
+ void)
+{
+ AcpiEvSciDispatch ();
+}
+
#endif /* !ACPI_REDUCED_HARDWARE */
#endif /* ACPI_DEBUGGER */
diff --git a/sys/contrib/dev/acpica/components/debugger/dbfileio.c b/sys/contrib/dev/acpica/components/debugger/dbfileio.c
index e03bb40..05fa4e7 100644
--- a/sys/contrib/dev/acpica/components/debugger/dbfileio.c
+++ b/sys/contrib/dev/acpica/components/debugger/dbfileio.c
@@ -142,7 +142,8 @@ AcpiDbOpenDebugFile (
}
AcpiOsPrintf ("Debug output file %s opened\n", Name);
- ACPI_STRCPY (AcpiGbl_DbDebugFilename, Name);
+ ACPI_STRNCPY (AcpiGbl_DbDebugFilename, Name,
+ sizeof (AcpiGbl_DbDebugFilename));
AcpiGbl_DbOutputToFile = TRUE;
#endif
@@ -274,11 +275,9 @@ AcpiDbReadTable (
fseek (fp, 0, SEEK_SET);
- /* The RSDT, FACS and S3PT tables do not have standard ACPI headers */
+ /* The RSDP table does not have standard ACPI header */
- if (ACPI_COMPARE_NAME (TableHeader.Signature, "RSD ") ||
- ACPI_COMPARE_NAME (TableHeader.Signature, "FACS") ||
- ACPI_COMPARE_NAME (TableHeader.Signature, "S3PT"))
+ if (ACPI_COMPARE_NAME (TableHeader.Signature, "RSD "))
{
*TableLength = FileSize;
StandardHeader = FALSE;
diff --git a/sys/contrib/dev/acpica/components/debugger/dbinput.c b/sys/contrib/dev/acpica/components/debugger/dbinput.c
index a54c239..c1a51f8 100644
--- a/sys/contrib/dev/acpica/components/debugger/dbinput.c
+++ b/sys/contrib/dev/acpica/components/debugger/dbinput.c
@@ -132,12 +132,14 @@ enum AcpiExDebuggerCommands
CMD_OPEN,
CMD_OSI,
CMD_OWNER,
+ CMD_PATHS,
CMD_PREDEFINED,
CMD_PREFIX,
CMD_QUIT,
CMD_REFERENCES,
CMD_RESOURCES,
CMD_RESULTS,
+ CMD_SCI,
CMD_SET,
CMD_SLEEP,
CMD_STATS,
@@ -203,12 +205,14 @@ static const ACPI_DB_COMMAND_INFO AcpiGbl_DbCommands[] =
{"OPEN", 1},
{"OSI", 0},
{"OWNER", 1},
+ {"PATHS", 0},
{"PREDEFINED", 0},
{"PREFIX", 0},
{"QUIT", 0},
{"REFERENCES", 1},
{"RESOURCES", 0},
{"RESULTS", 0},
+ {"SCI", 0},
{"SET", 3},
{"SLEEP", 0},
{"STATS", 1},
@@ -259,22 +263,19 @@ static const ACPI_DB_COMMAND_HELP AcpiGbl_DbCommandHelp[] =
{0, "\nNamespace Access Commands:", "\n"},
{1, " Businfo", "Display system bus info\n"},
{1, " Disassemble <Method>", "Disassemble a control method\n"},
- {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
{1, " Find <AcpiName> (? is wildcard)", "Find ACPI name(s) with wildcards\n"},
- {1, " Gpe <GpeNum> <GpeBlock>", "Simulate a GPE\n"},
- {1, " Gpes", "Display info on all GPEs\n"},
{1, " Integrity", "Validate namespace integrity\n"},
{1, " Methods", "Display list of loaded control methods\n"},
{1, " Namespace [Object] [Depth]", "Display loaded namespace tree/subtree\n"},
{1, " Notify <Object> <Value>", "Send a notification on Object\n"},
{1, " Objects <ObjectType>", "Display all objects of the given type\n"},
{1, " Owner <OwnerId> [Depth]", "Display loaded namespace by object owner\n"},
+ {1, " Paths", "Display full pathnames of namespace objects\n"},
{1, " Predefined", "Check all predefined names\n"},
{1, " Prefix [<NamePath>]", "Set or Get current execution prefix\n"},
{1, " References <Addr>", "Find all references to object at addr\n"},
{1, " Resources [DeviceName]", "Display Device resources (no arg = all devices)\n"},
{1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
- {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
{1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
{1, " Terminate", "Delete namespace and all internal objects\n"},
{1, " Type <Object>", "Display object type\n"},
@@ -288,7 +289,7 @@ static const ACPI_DB_COMMAND_HELP AcpiGbl_DbCommandHelp[] =
{5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
{1, " Hex Integer", "Integer method argument\n"},
{1, " \"Ascii String\"", "String method argument\n"},
- {1, " (Byte List)", "Buffer method argument\n"},
+ {1, " (Hex Byte List)", "Buffer method argument\n"},
{1, " [Package Element List]", "Package method argument\n"},
{1, " Go", "Allow method to run to completion\n"},
{1, " Information", "Display info about the current method\n"},
@@ -303,6 +304,13 @@ static const ACPI_DB_COMMAND_HELP AcpiGbl_DbCommandHelp[] =
{1, " Tree", "Display control method calling tree\n"},
{1, " <Enter>", "Single step next AML opcode (over calls)\n"},
+ {0, "\nHardware Related Commands:", "\n"},
+ {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+ {1, " Gpe <GpeNum> <GpeBlock>", "Simulate a GPE\n"},
+ {1, " Gpes", "Display info on all GPEs\n"},
+ {1, " Sci", "Generate an SCI\n"},
+ {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
+
{0, "\nFile I/O Commands:", "\n"},
{1, " Close", "Close debug output file\n"},
{1, " Load <Input Filename>", "Load ACPI table from a file\n"},
@@ -1002,6 +1010,11 @@ AcpiDbCommandDispatch (
AcpiDbDumpNamespaceByOwner (AcpiGbl_DbArgs[1], AcpiGbl_DbArgs[2]);
break;
+ case CMD_PATHS:
+
+ AcpiDbDumpNamespacePaths ();
+ break;
+
case CMD_PREDEFINED:
AcpiDbCheckPredefinedNames ();
@@ -1027,6 +1040,11 @@ AcpiDbCommandDispatch (
AcpiDbDisplayResults ();
break;
+ case CMD_SCI:
+
+ AcpiDbGenerateSci ();
+ break;
+
case CMD_SET:
AcpiDbSetMethodData (AcpiGbl_DbArgs[1], AcpiGbl_DbArgs[2],
diff --git a/sys/contrib/dev/acpica/components/debugger/dbnames.c b/sys/contrib/dev/acpica/components/debugger/dbnames.c
index c9f3e0a..5ac39fd 100644
--- a/sys/contrib/dev/acpica/components/debugger/dbnames.c
+++ b/sys/contrib/dev/acpica/components/debugger/dbnames.c
@@ -256,6 +256,37 @@ AcpiDbDumpNamespace (
/*******************************************************************************
*
+ * FUNCTION: AcpiDbDumpNamespacePaths
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump entire namespace with full object pathnames and object
+ * type information. Alternative to "namespace" command.
+ *
+ ******************************************************************************/
+
+void
+AcpiDbDumpNamespacePaths (
+ void)
+{
+
+ AcpiDbSetOutputDestination (ACPI_DB_DUPLICATE_OUTPUT);
+ AcpiOsPrintf ("ACPI Namespace (from root):\n");
+
+ /* Display the entire namespace */
+
+ AcpiDbSetOutputDestination (ACPI_DB_REDIRECTABLE_OUTPUT);
+ AcpiNsDumpObjectPaths (ACPI_TYPE_ANY, ACPI_DISPLAY_SUMMARY,
+ ACPI_UINT32_MAX, ACPI_OWNER_ID_MAX, AcpiGbl_RootNode);
+
+ AcpiDbSetOutputDestination (ACPI_DB_CONSOLE_OUTPUT);
+}
+
+
+/*******************************************************************************
+ *
* FUNCTION: AcpiDbDumpNamespaceByOwner
*
* PARAMETERS: OwnerArg - Owner ID whose nodes will be displayed
diff --git a/sys/contrib/dev/acpica/components/events/evgpeutil.c b/sys/contrib/dev/acpica/components/events/evgpeutil.c
index 1841f9d..35caa5a 100644
--- a/sys/contrib/dev/acpica/components/events/evgpeutil.c
+++ b/sys/contrib/dev/acpica/components/events/evgpeutil.c
@@ -216,7 +216,7 @@ AcpiEvGetGpeDevice (
*
* FUNCTION: AcpiEvGetGpeXruptBlock
*
- * PARAMETERS: InterruptNumber - Interrupt for a GPE block
+ * PARAMETERS: InterruptNumber - Interrupt for a GPE block
*
* RETURN: A GPE interrupt block
*
diff --git a/sys/contrib/dev/acpica/components/events/evmisc.c b/sys/contrib/dev/acpica/components/events/evmisc.c
index 740eb28..547534b 100644
--- a/sys/contrib/dev/acpica/components/events/evmisc.c
+++ b/sys/contrib/dev/acpica/components/events/evmisc.c
@@ -292,15 +292,6 @@ AcpiEvTerminate (
Status = AcpiEvWalkGpeList (AcpiHwDisableGpeBlock, NULL);
- /* Remove SCI handler */
-
- Status = AcpiEvRemoveSciHandler ();
- if (ACPI_FAILURE(Status))
- {
- ACPI_ERROR ((AE_INFO,
- "Could not remove SCI handler"));
- }
-
Status = AcpiEvRemoveGlobalLockHandler ();
if (ACPI_FAILURE(Status))
{
@@ -311,6 +302,15 @@ AcpiEvTerminate (
AcpiGbl_EventsInitialized = FALSE;
}
+ /* Remove SCI handlers */
+
+ Status = AcpiEvRemoveAllSciHandlers ();
+ if (ACPI_FAILURE(Status))
+ {
+ ACPI_ERROR ((AE_INFO,
+ "Could not remove SCI handler"));
+ }
+
/* Deallocate all handler objects installed within GPE info structs */
Status = AcpiEvWalkGpeList (AcpiEvDeleteGpeHandlers, NULL);
diff --git a/sys/contrib/dev/acpica/components/events/evregion.c b/sys/contrib/dev/acpica/components/events/evregion.c
index 43fe9b2..d469d6e 100644
--- a/sys/contrib/dev/acpica/components/events/evregion.c
+++ b/sys/contrib/dev/acpica/components/events/evregion.c
@@ -234,18 +234,12 @@ AcpiEvAddressSpaceDispatch (
{
RegionObj->Region.Flags |= AOPOBJ_SETUP_COMPLETE;
- if (RegionObj2->Extra.RegionContext)
- {
- /* The handler for this region was already installed */
-
- ACPI_FREE (RegionContext);
- }
- else
+ /*
+ * Save the returned context for use in all accesses to
+ * the handler for this particular region
+ */
+ if (!(RegionObj2->Extra.RegionContext))
{
- /*
- * Save the returned context for use in all accesses to
- * this particular region
- */
RegionObj2->Extra.RegionContext = RegionContext;
}
}
@@ -261,7 +255,6 @@ AcpiEvAddressSpaceDispatch (
ACPI_FORMAT_NATIVE_UINT (RegionObj->Region.Address + RegionOffset),
AcpiUtGetRegionName (RegionObj->Region.SpaceId)));
-
/*
* Special handling for GenericSerialBus and GeneralPurposeIo:
* There are three extra parameters that must be passed to the
@@ -424,6 +417,15 @@ AcpiEvDetachRegion(
Status = RegionSetup (RegionObj, ACPI_REGION_DEACTIVATE,
HandlerObj->AddressSpace.Context, RegionContext);
+ /*
+ * RegionContext should have been released by the deactivate
+ * operation. We don't need access to it anymore here.
+ */
+ if (RegionContext)
+ {
+ *RegionContext = NULL;
+ }
+
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE (Status))
diff --git a/sys/contrib/dev/acpica/components/events/evsci.c b/sys/contrib/dev/acpica/components/events/evsci.c
index ed87c63..4fa84f3 100644
--- a/sys/contrib/dev/acpica/components/events/evsci.c
+++ b/sys/contrib/dev/acpica/components/events/evsci.c
@@ -61,6 +61,57 @@ AcpiEvSciXruptHandler (
/*******************************************************************************
*
+ * FUNCTION: AcpiEvSciDispatch
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+UINT32
+AcpiEvSciDispatch (
+ void)
+{
+ ACPI_SCI_HANDLER_INFO *SciHandler;
+ ACPI_CPU_FLAGS Flags;
+ UINT32 IntStatus = ACPI_INTERRUPT_NOT_HANDLED;
+
+
+ ACPI_FUNCTION_NAME (EvSciDispatch);
+
+
+ /* Are there any host-installed SCI handlers? */
+
+ if (!AcpiGbl_SciHandlerList)
+ {
+ return (IntStatus);
+ }
+
+ Flags = AcpiOsAcquireLock (AcpiGbl_GpeLock);
+
+ /* Invoke all host-installed SCI handlers */
+
+ SciHandler = AcpiGbl_SciHandlerList;
+ while (SciHandler)
+ {
+ /* Invoke the installed handler (at interrupt level) */
+
+ IntStatus |= SciHandler->Address (
+ SciHandler->Context);
+
+ SciHandler = SciHandler->Next;
+ }
+
+ AcpiOsReleaseLock (AcpiGbl_GpeLock, Flags);
+ return (IntStatus);
+}
+
+
+/*******************************************************************************
+ *
* FUNCTION: AcpiEvSciXruptHandler
*
* PARAMETERS: Context - Calling Context
@@ -100,6 +151,10 @@ AcpiEvSciXruptHandler (
*/
InterruptHandled |= AcpiEvGpeDetect (GpeXruptList);
+ /* Invoke all host-installed SCI handlers */
+
+ InterruptHandled |= AcpiEvSciDispatch ();
+
AcpiSciCount++;
return_UINT32 (InterruptHandled);
}
@@ -129,14 +184,13 @@ AcpiEvGpeXruptHandler (
/*
- * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * We are guaranteed by the ACPICA initialization/shutdown code that
* if this interrupt handler is installed, ACPI is enabled.
*/
/* GPEs: Check for and dispatch any GPEs that have occurred */
InterruptHandled |= AcpiEvGpeDetect (GpeXruptList);
-
return_UINT32 (InterruptHandled);
}
@@ -171,15 +225,15 @@ AcpiEvInstallSciHandler (
/******************************************************************************
*
- * FUNCTION: AcpiEvRemoveSciHandler
+ * FUNCTION: AcpiEvRemoveAllSciHandlers
*
* PARAMETERS: none
*
- * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN: AE_OK if handler uninstalled, AE_ERROR if handler was not
* installed to begin with
*
* DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- * taken.
+ * taken. Remove all host-installed SCI handlers.
*
* Note: It doesn't seem important to disable all events or set the event
* enable registers to their original values. The OS should disable
@@ -189,13 +243,15 @@ AcpiEvInstallSciHandler (
******************************************************************************/
ACPI_STATUS
-AcpiEvRemoveSciHandler (
+AcpiEvRemoveAllSciHandlers (
void)
{
+ ACPI_SCI_HANDLER_INFO *SciHandler;
+ ACPI_CPU_FLAGS Flags;
ACPI_STATUS Status;
- ACPI_FUNCTION_TRACE (EvRemoveSciHandler);
+ ACPI_FUNCTION_TRACE (EvRemoveAllSciHandlers);
/* Just let the OS remove the handler and disable the level */
@@ -203,6 +259,23 @@ AcpiEvRemoveSciHandler (
Status = AcpiOsRemoveInterruptHandler ((UINT32) AcpiGbl_FADT.SciInterrupt,
AcpiEvSciXruptHandler);
+ if (!AcpiGbl_SciHandlerList)
+ {
+ return (Status);
+ }
+
+ Flags = AcpiOsAcquireLock (AcpiGbl_GpeLock);
+
+ /* Free all host-installed SCI handlers */
+
+ while (AcpiGbl_SciHandlerList)
+ {
+ SciHandler = AcpiGbl_SciHandlerList;
+ AcpiGbl_SciHandlerList = SciHandler->Next;
+ ACPI_FREE (SciHandler);
+ }
+
+ AcpiOsReleaseLock (AcpiGbl_GpeLock, Flags);
return_ACPI_STATUS (Status);
}
diff --git a/sys/contrib/dev/acpica/components/events/evxface.c b/sys/contrib/dev/acpica/components/events/evxface.c
index cf5dbed..4ed3247 100644
--- a/sys/contrib/dev/acpica/components/events/evxface.c
+++ b/sys/contrib/dev/acpica/components/events/evxface.c
@@ -435,6 +435,169 @@ ACPI_EXPORT_SYMBOL (AcpiInstallExceptionHandler)
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
+ * FUNCTION: AcpiInstallSciHandler
+ *
+ * PARAMETERS: Address - Address of the handler
+ * Context - Value passed to the handler on each SCI
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+AcpiInstallSciHandler (
+ ACPI_SCI_HANDLER Address,
+ void *Context)
+{
+ ACPI_SCI_HANDLER_INFO *NewSciHandler;
+ ACPI_SCI_HANDLER_INFO *SciHandler;
+ ACPI_CPU_FLAGS Flags;
+ ACPI_STATUS Status;
+
+
+ ACPI_FUNCTION_TRACE (AcpiInstallSciHandler);
+
+
+ if (!Address)
+ {
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ }
+
+ /* Allocate and init a handler object */
+
+ NewSciHandler = ACPI_ALLOCATE (sizeof (ACPI_SCI_HANDLER_INFO));
+ if (!NewSciHandler)
+ {
+ return_ACPI_STATUS (AE_NO_MEMORY);
+ }
+
+ NewSciHandler->Address = Address;
+ NewSciHandler->Context = Context;
+
+ Status = AcpiUtAcquireMutex (ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE (Status))
+ {
+ goto Exit;
+ }
+
+ /* Lock list during installation */
+
+ Flags = AcpiOsAcquireLock (AcpiGbl_GpeLock);
+ SciHandler = AcpiGbl_SciHandlerList;
+
+ /* Ensure handler does not already exist */
+
+ while (SciHandler)
+ {
+ if (Address == SciHandler->Address)
+ {
+ Status = AE_ALREADY_EXISTS;
+ goto UnlockAndExit;
+ }
+
+ SciHandler = SciHandler->Next;
+ }
+
+ /* Install the new handler into the global list (at head) */
+
+ NewSciHandler->Next = AcpiGbl_SciHandlerList;
+ AcpiGbl_SciHandlerList = NewSciHandler;
+
+
+UnlockAndExit:
+
+ AcpiOsReleaseLock (AcpiGbl_GpeLock, Flags);
+ (void) AcpiUtReleaseMutex (ACPI_MTX_EVENTS);
+
+Exit:
+ if (ACPI_FAILURE (Status))
+ {
+ ACPI_FREE (NewSciHandler);
+ }
+ return_ACPI_STATUS (Status);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: AcpiRemoveSciHandler
+ *
+ * PARAMETERS: Address - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+ACPI_STATUS
+AcpiRemoveSciHandler (
+ ACPI_SCI_HANDLER Address)
+{
+ ACPI_SCI_HANDLER_INFO *PrevSciHandler;
+ ACPI_SCI_HANDLER_INFO *NextSciHandler;
+ ACPI_CPU_FLAGS Flags;
+ ACPI_STATUS Status;
+
+
+ ACPI_FUNCTION_TRACE (AcpiRemoveSciHandler);
+
+
+ if (!Address)
+ {
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ }
+
+ Status = AcpiUtAcquireMutex (ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE (Status))
+ {
+ return_ACPI_STATUS (Status);
+ }
+
+ /* Remove the SCI handler with lock */
+
+ Flags = AcpiOsAcquireLock (AcpiGbl_GpeLock);
+
+ PrevSciHandler = NULL;
+ NextSciHandler = AcpiGbl_SciHandlerList;
+ while (NextSciHandler)
+ {
+ if (NextSciHandler->Address == Address)
+ {
+ /* Unlink and free the SCI handler info block */
+
+ if (PrevSciHandler)
+ {
+ PrevSciHandler->Next = NextSciHandler->Next;
+ }
+ else
+ {
+ AcpiGbl_SciHandlerList = NextSciHandler->Next;
+ }
+
+ AcpiOsReleaseLock (AcpiGbl_GpeLock, Flags);
+ ACPI_FREE (NextSciHandler);
+ goto UnlockAndExit;
+ }
+
+ PrevSciHandler = NextSciHandler;
+ NextSciHandler = NextSciHandler->Next;
+ }
+
+ AcpiOsReleaseLock (AcpiGbl_GpeLock, Flags);
+ Status = AE_NOT_EXIST;
+
+
+UnlockAndExit:
+ (void) AcpiUtReleaseMutex (ACPI_MTX_EVENTS);
+ return_ACPI_STATUS (Status);
+}
+
+
+/*******************************************************************************
+ *
* FUNCTION: AcpiInstallGlobalEventHandler
*
* PARAMETERS: Handler - Pointer to the global event handler function
diff --git a/sys/contrib/dev/acpica/components/hardware/hwxface.c b/sys/contrib/dev/acpica/components/hardware/hwxface.c
index ded0a6b..8e0e5be 100644
--- a/sys/contrib/dev/acpica/components/hardware/hwxface.c
+++ b/sys/contrib/dev/acpica/components/hardware/hwxface.c
@@ -131,7 +131,8 @@ AcpiRead (
UINT64 *ReturnValue,
ACPI_GENERIC_ADDRESS *Reg)
{
- UINT32 Value;
+ UINT32 ValueLo;
+ UINT32 ValueHi;
UINT32 Width;
UINT64 Address;
ACPI_STATUS Status;
@@ -153,13 +154,8 @@ AcpiRead (
return (Status);
}
- /* Initialize entire 64-bit return value to zero */
-
- *ReturnValue = 0;
- Value = 0;
-
/*
- * Two address spaces supported: Memory or IO. PCI_Config is
+ * Two address spaces supported: Memory or I/O. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (Reg->SpaceId == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -173,6 +169,9 @@ AcpiRead (
}
else /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
{
+ ValueLo = 0;
+ ValueHi = 0;
+
Width = Reg->BitWidth;
if (Width == 64)
{
@@ -180,25 +179,27 @@ AcpiRead (
}
Status = AcpiHwReadPort ((ACPI_IO_ADDRESS)
- Address, &Value, Width);
+ Address, &ValueLo, Width);
if (ACPI_FAILURE (Status))
{
return (Status);
}
- *ReturnValue = Value;
if (Reg->BitWidth == 64)
{
/* Read the top 32 bits */
Status = AcpiHwReadPort ((ACPI_IO_ADDRESS)
- (Address + 4), &Value, 32);
+ (Address + 4), &ValueHi, 32);
if (ACPI_FAILURE (Status))
{
return (Status);
}
- *ReturnValue |= ((UINT64) Value << 32);
}
+
+ /* Set the return value only if status is AE_OK */
+
+ *ReturnValue = (ValueLo | ((UINT64) ValueHi << 32));
}
ACPI_DEBUG_PRINT ((ACPI_DB_IO,
@@ -207,7 +208,7 @@ AcpiRead (
ACPI_FORMAT_UINT64 (Address),
AcpiUtGetRegionName (Reg->SpaceId)));
- return (Status);
+ return (AE_OK);
}
ACPI_EXPORT_SYMBOL (AcpiRead)
diff --git a/sys/contrib/dev/acpica/components/namespace/nsaccess.c b/sys/contrib/dev/acpica/components/namespace/nsaccess.c
index aedb550..523e092 100644
--- a/sys/contrib/dev/acpica/components/namespace/nsaccess.c
+++ b/sys/contrib/dev/acpica/components/namespace/nsaccess.c
@@ -448,8 +448,8 @@ AcpiNsLookup (
/* Current scope has no parent scope */
ACPI_ERROR ((AE_INFO,
- "ACPI path has too many parent prefixes (^) "
- "- reached beyond root node"));
+ "%s: Path has too many parent prefixes (^) "
+ "- reached beyond root node", Pathname));
return_ACPI_STATUS (AE_NOT_FOUND);
}
}
diff --git a/sys/contrib/dev/acpica/components/namespace/nsdump.c b/sys/contrib/dev/acpica/components/namespace/nsdump.c
index 41f1214..0ec5642 100644
--- a/sys/contrib/dev/acpica/components/namespace/nsdump.c
+++ b/sys/contrib/dev/acpica/components/namespace/nsdump.c
@@ -69,6 +69,22 @@ AcpiNsDumpOneDevice (
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+static ACPI_STATUS
+AcpiNsDumpOneObjectPath (
+ ACPI_HANDLE ObjHandle,
+ UINT32 Level,
+ void *Context,
+ void **ReturnValue);
+
+static ACPI_STATUS
+AcpiNsGetMaxDepth (
+ ACPI_HANDLE ObjHandle,
+ UINT32 Level,
+ void *Context,
+ void **ReturnValue);
+
+
/*******************************************************************************
*
* FUNCTION: AcpiNsPrintPathname
@@ -697,6 +713,142 @@ AcpiNsDumpObjects (
/*******************************************************************************
*
+ * FUNCTION: AcpiNsDumpOneObjectPath, AcpiNsGetMaxDepth
+ *
+ * PARAMETERS: ObjHandle - Node to be dumped
+ * Level - Nesting level of the handle
+ * Context - Passed into WalkNamespace
+ * ReturnValue - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. AcpNsGetMaxDepth
+ * computes the maximum nesting depth in the namespace tree, in
+ * order to simplify formatting in AcpiNsDumpOneObjectPath.
+ * These procedures are UserFunctions called by AcpiNsWalkNamespace.
+ *
+ ******************************************************************************/
+
+static ACPI_STATUS
+AcpiNsDumpOneObjectPath (
+ ACPI_HANDLE ObjHandle,
+ UINT32 Level,
+ void *Context,
+ void **ReturnValue)
+{
+ UINT32 MaxLevel = *((UINT32 *) Context);
+ char *Pathname;
+ ACPI_NAMESPACE_NODE *Node;
+ int PathIndent;
+
+
+ if (!ObjHandle)
+ {
+ return (AE_OK);
+ }
+
+ Node = AcpiNsValidateHandle (ObjHandle);
+ Pathname = AcpiNsGetExternalPathname (Node);
+
+ PathIndent = 1;
+ if (Level <= MaxLevel)
+ {
+ PathIndent = MaxLevel - Level + 1;
+ }
+
+ AcpiOsPrintf ("%2d%*s%-12s%*s",
+ Level, Level, " ", AcpiUtGetTypeName (Node->Type),
+ PathIndent, " ");
+
+ AcpiOsPrintf ("%s\n", &Pathname[1]);
+ ACPI_FREE (Pathname);
+ return (AE_OK);
+}
+
+
+static ACPI_STATUS
+AcpiNsGetMaxDepth (
+ ACPI_HANDLE ObjHandle,
+ UINT32 Level,
+ void *Context,
+ void **ReturnValue)
+{
+ UINT32 *MaxLevel = (UINT32 *) Context;
+
+
+ if (Level > *MaxLevel)
+ {
+ *MaxLevel = Level;
+ }
+ return (AE_OK);
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: AcpiNsDumpObjectPaths
+ *
+ * PARAMETERS: Type - Object type to be dumped
+ * DisplayType - 0 or ACPI_DISPLAY_SUMMARY
+ * MaxDepth - Maximum depth of dump. Use ACPI_UINT32_MAX
+ * for an effectively unlimited depth.
+ * OwnerId - Dump only objects owned by this ID. Use
+ * ACPI_UINT32_MAX to match all owners.
+ * StartHandle - Where in namespace to start/end search
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ * AcpiNsWalkNamespace in conjunction with AcpiNsDumpOneObjectPath.
+ *
+ ******************************************************************************/
+
+void
+AcpiNsDumpObjectPaths (
+ ACPI_OBJECT_TYPE Type,
+ UINT8 DisplayType,
+ UINT32 MaxDepth,
+ ACPI_OWNER_ID OwnerId,
+ ACPI_HANDLE StartHandle)
+{
+ ACPI_STATUS Status;
+ UINT32 MaxLevel = 0;
+
+
+ ACPI_FUNCTION_ENTRY ();
+
+
+ /*
+ * Just lock the entire namespace for the duration of the dump.
+ * We don't want any changes to the namespace during this time,
+ * especially the temporary nodes since we are going to display
+ * them also.
+ */
+ Status = AcpiUtAcquireMutex (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE (Status))
+ {
+ AcpiOsPrintf ("Could not acquire namespace mutex\n");
+ return;
+ }
+
+ /* Get the max depth of the namespace tree, for formatting later */
+
+ (void) AcpiNsWalkNamespace (Type, StartHandle, MaxDepth,
+ ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES,
+ AcpiNsGetMaxDepth, NULL, (void *) &MaxLevel, NULL);
+
+ /* Now dump the entire namespace */
+
+ (void) AcpiNsWalkNamespace (Type, StartHandle, MaxDepth,
+ ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES,
+ AcpiNsDumpOneObjectPath, NULL, (void *) &MaxLevel, NULL);
+
+ (void) AcpiUtReleaseMutex (ACPI_MTX_NAMESPACE);
+}
+
+
+/*******************************************************************************
+ *
* FUNCTION: AcpiNsDumpEntry
*
* PARAMETERS: Handle - Node to be dumped
diff --git a/sys/contrib/dev/acpica/components/namespace/nsxfeval.c b/sys/contrib/dev/acpica/components/namespace/nsxfeval.c
index 2ec6418..984ee64 100644
--- a/sys/contrib/dev/acpica/components/namespace/nsxfeval.c
+++ b/sys/contrib/dev/acpica/components/namespace/nsxfeval.c
@@ -654,10 +654,19 @@ AcpiWalkNamespace (
goto UnlockAndExit;
}
+ /* Now we can validate the starting node */
+
+ if (!AcpiNsValidateHandle (StartObject))
+ {
+ Status = AE_BAD_PARAMETER;
+ goto UnlockAndExit2;
+ }
+
Status = AcpiNsWalkNamespace (Type, StartObject, MaxDepth,
ACPI_NS_WALK_UNLOCK, DescendingCallback,
AscendingCallback, Context, ReturnValue);
+UnlockAndExit2:
(void) AcpiUtReleaseMutex (ACPI_MTX_NAMESPACE);
UnlockAndExit:
diff --git a/sys/contrib/dev/acpica/components/tables/tbinstal.c b/sys/contrib/dev/acpica/components/tables/tbinstal.c
index 6ca141f..0652b16 100644
--- a/sys/contrib/dev/acpica/components/tables/tbinstal.c
+++ b/sys/contrib/dev/acpica/components/tables/tbinstal.c
@@ -93,14 +93,9 @@ AcpiTbVerifyTable (
}
}
- /* FACS is the odd table, has no standard ACPI header and no checksum */
+ /* Always calculate checksum, ignore bad checksum if requested */
- if (!ACPI_COMPARE_NAME (&TableDesc->Signature, ACPI_SIG_FACS))
- {
- /* Always calculate checksum, ignore bad checksum if requested */
-
- Status = AcpiTbVerifyChecksum (TableDesc->Pointer, TableDesc->Length);
- }
+ Status = AcpiTbVerifyChecksum (TableDesc->Pointer, TableDesc->Length);
return_ACPI_STATUS (Status);
}
diff --git a/sys/contrib/dev/acpica/components/tables/tbprint.c b/sys/contrib/dev/acpica/components/tables/tbprint.c
index 6c83bf3..e85acce 100644
--- a/sys/contrib/dev/acpica/components/tables/tbprint.c
+++ b/sys/contrib/dev/acpica/components/tables/tbprint.c
@@ -158,7 +158,7 @@ AcpiTbPrintTableHeader (
Header->Signature, ACPI_CAST_PTR (void, Address),
Header->Length));
}
- else if (ACPI_COMPARE_NAME (Header->Signature, ACPI_SIG_RSDP))
+ else if (ACPI_VALIDATE_RSDP_SIG (Header->Signature))
{
/* RSDP has no common fields */
@@ -211,6 +211,17 @@ AcpiTbVerifyChecksum (
UINT8 Checksum;
+ /*
+ * FACS/S3PT:
+ * They are the odd tables, have no standard ACPI header and no checksum
+ */
+
+ if (ACPI_COMPARE_NAME (Table->Signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAME (Table->Signature, ACPI_SIG_FACS))
+ {
+ return (AE_OK);
+ }
+
/* Compute the checksum on the table */
Checksum = AcpiTbChecksum (ACPI_CAST_PTR (UINT8, Table), Length);
diff --git a/sys/contrib/dev/acpica/components/tables/tbxfroot.c b/sys/contrib/dev/acpica/components/tables/tbxfroot.c
index c509350..0b9fe72 100644
--- a/sys/contrib/dev/acpica/components/tables/tbxfroot.c
+++ b/sys/contrib/dev/acpica/components/tables/tbxfroot.c
@@ -75,8 +75,7 @@ AcpiTbValidateRsdp (
* Note: Sometimes there exists more than one RSDP in memory; the valid
* RSDP has a valid checksum, all others have an invalid checksum.
*/
- if (ACPI_STRNCMP ((char *) Rsdp->Signature, ACPI_SIG_RSDP,
- sizeof (ACPI_SIG_RSDP)-1) != 0)
+ if (!ACPI_VALIDATE_RSDP_SIG (Rsdp->Signature))
{
/* Nope, BAD Signature */
diff --git a/sys/contrib/dev/acpica/components/utilities/utdebug.c b/sys/contrib/dev/acpica/components/utilities/utdebug.c
index 1780595..93c0361 100644
--- a/sys/contrib/dev/acpica/components/utilities/utdebug.c
+++ b/sys/contrib/dev/acpica/components/utilities/utdebug.c
@@ -216,7 +216,7 @@ AcpiDebugPrint (
* Display the module name, current line number, thread ID (if requested),
* current procedure nesting level, and the current procedure name
*/
- AcpiOsPrintf ("%8s-%04ld ", ModuleName, LineNumber);
+ AcpiOsPrintf ("%9s-%04ld ", ModuleName, LineNumber);
if (ACPI_LV_THREADS & AcpiDbgLevel)
{
diff --git a/sys/contrib/dev/acpica/components/utilities/utglobal.c b/sys/contrib/dev/acpica/components/utilities/utglobal.c
index be45950..6233a4a 100644
--- a/sys/contrib/dev/acpica/components/utilities/utglobal.c
+++ b/sys/contrib/dev/acpica/components/utilities/utglobal.c
@@ -274,7 +274,7 @@ AcpiUtInitGlobals (
#if (!ACPI_REDUCED_HARDWARE)
- /* GPE support */
+ /* GPE/SCI support */
AcpiGbl_AllGpesInitialized = FALSE;
AcpiGbl_GpeXruptListHead = NULL;
@@ -283,6 +283,7 @@ AcpiUtInitGlobals (
AcpiCurrentGpeCount = 0;
AcpiGbl_GlobalEventHandler = NULL;
+ AcpiGbl_SciHandlerList = NULL;
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/sys/contrib/dev/acpica/include/acdebug.h b/sys/contrib/dev/acpica/include/acdebug.h
index 5fef182..37d41d3 100644
--- a/sys/contrib/dev/acpica/include/acdebug.h
+++ b/sys/contrib/dev/acpica/include/acdebug.h
@@ -159,6 +159,10 @@ AcpiDbGenerateGpe (
char *GpeArg,
char *BlockArg))
+ACPI_HW_DEPENDENT_RETURN_VOID (
+void
+AcpiDbGenerateSci (
+ void))
/*
* dbconvert - miscellaneous conversion routines
@@ -234,6 +238,10 @@ AcpiDbDumpNamespace (
char *DepthArg);
void
+AcpiDbDumpNamespacePaths (
+ void);
+
+void
AcpiDbDumpNamespaceByOwner (
char *OwnerArg,
char *DepthArg);
diff --git a/sys/contrib/dev/acpica/include/acdisasm.h b/sys/contrib/dev/acpica/include/acdisasm.h
index 45b8212..4fadd40 100644
--- a/sys/contrib/dev/acpica/include/acdisasm.h
+++ b/sys/contrib/dev/acpica/include/acdisasm.h
@@ -694,6 +694,9 @@ void
AcpiDmUnresolvedWarning (
UINT8 Type);
+void
+AcpiDmGetExternalsFromFile (
+ void);
/*
* dmresrc
diff --git a/sys/contrib/dev/acpica/include/acevents.h b/sys/contrib/dev/acpica/include/acevents.h
index fc652b4..67095aa 100644
--- a/sys/contrib/dev/acpica/include/acevents.h
+++ b/sys/contrib/dev/acpica/include/acevents.h
@@ -339,17 +339,17 @@ AcpiEvGpeXruptHandler (
void *Context);
UINT32
+AcpiEvSciDispatch (
+ void);
+
+UINT32
AcpiEvInstallSciHandler (
void);
ACPI_STATUS
-AcpiEvRemoveSciHandler (
+AcpiEvRemoveAllSciHandlers (
void);
-UINT32
-AcpiEvInitializeSCI (
- UINT32 ProgramSCI);
-
ACPI_HW_DEPENDENT_RETURN_VOID (
void
AcpiEvTerminate (
diff --git a/sys/contrib/dev/acpica/include/acexcep.h b/sys/contrib/dev/acpica/include/acexcep.h
index 9961acc..7bcfe1d 100644
--- a/sys/contrib/dev/acpica/include/acexcep.h
+++ b/sys/contrib/dev/acpica/include/acexcep.h
@@ -127,8 +127,9 @@ typedef struct acpi_exception_info
#define AE_NO_HANDLER EXCEP_ENV (0x001A)
#define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B)
#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C)
+#define AE_ACCESS EXCEP_ENV (0x001D)
-#define AE_CODE_ENV_MAX 0x001C
+#define AE_CODE_ENV_MAX 0x001D
/*
@@ -235,7 +236,7 @@ static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Env[] =
EXCEP_TXT ("AE_NO_ACPI_TABLES", "ACPI tables could not be found"),
EXCEP_TXT ("AE_NO_NAMESPACE", "A namespace has not been loaded"),
EXCEP_TXT ("AE_NO_MEMORY", "Insufficient dynamic memory"),
- EXCEP_TXT ("AE_NOT_FOUND", "The name was not found in the namespace"),
+ EXCEP_TXT ("AE_NOT_FOUND", "A requested entity is not found"),
EXCEP_TXT ("AE_NOT_EXIST", "A required entity does not exist"),
EXCEP_TXT ("AE_ALREADY_EXISTS", "An entity already exists"),
EXCEP_TXT ("AE_TYPE", "The object type is incorrect"),
@@ -258,7 +259,8 @@ static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Env[] =
EXCEP_TXT ("AE_SAME_HANDLER", "Attempt was made to install the same handler that is already installed"),
EXCEP_TXT ("AE_NO_HANDLER", "A handler for the operation is not installed"),
EXCEP_TXT ("AE_OWNER_ID_LIMIT", "There are no more Owner IDs available for ACPI tables or control methods"),
- EXCEP_TXT ("AE_NOT_CONFIGURED", "The interface is not part of the current subsystem configuration")
+ EXCEP_TXT ("AE_NOT_CONFIGURED", "The interface is not part of the current subsystem configuration"),
+ EXCEP_TXT ("AE_ACCESS", "Permission denied for the requested operation")
};
static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Pgm[] =
diff --git a/sys/contrib/dev/acpica/include/acglobal.h b/sys/contrib/dev/acpica/include/acglobal.h
index a453805..cb76f65 100644
--- a/sys/contrib/dev/acpica/include/acglobal.h
+++ b/sys/contrib/dev/acpica/include/acglobal.h
@@ -274,6 +274,7 @@ ACPI_EXTERN ACPI_TABLE_HANDLER AcpiGbl_TableHandler;
ACPI_EXTERN void *AcpiGbl_TableHandlerContext;
ACPI_EXTERN ACPI_WALK_STATE *AcpiGbl_BreakpointWalk;
ACPI_EXTERN ACPI_INTERFACE_HANDLER AcpiGbl_InterfaceHandler;
+ACPI_EXTERN ACPI_SCI_HANDLER_INFO *AcpiGbl_SciHandlerList;
/* Owner ID support */
@@ -453,13 +454,6 @@ ACPI_EXTERN BOOLEAN AcpiGbl_DbOpt_tables;
ACPI_EXTERN BOOLEAN AcpiGbl_DbOpt_stats;
ACPI_EXTERN BOOLEAN AcpiGbl_DbOpt_ini_methods;
ACPI_EXTERN BOOLEAN AcpiGbl_DbOpt_NoRegionSupport;
-
-ACPI_EXTERN char *AcpiGbl_DbArgs[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN ACPI_OBJECT_TYPE AcpiGbl_DbArgTypes[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char AcpiGbl_DbLineBuf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char AcpiGbl_DbParsedBuf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char AcpiGbl_DbScopeBuf[80];
-ACPI_EXTERN char AcpiGbl_DbDebugFilename[80];
ACPI_EXTERN BOOLEAN AcpiGbl_DbOutputToFile;
ACPI_EXTERN char *AcpiGbl_DbBuffer;
ACPI_EXTERN char *AcpiGbl_DbFilename;
@@ -467,6 +461,16 @@ ACPI_EXTERN UINT32 AcpiGbl_DbDebugLevel;
ACPI_EXTERN UINT32 AcpiGbl_DbConsoleDebugLevel;
ACPI_EXTERN ACPI_NAMESPACE_NODE *AcpiGbl_DbScopeNode;
+ACPI_EXTERN char *AcpiGbl_DbArgs[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN ACPI_OBJECT_TYPE AcpiGbl_DbArgTypes[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char AcpiGbl_DbLineBuf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char AcpiGbl_DbParsedBuf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char AcpiGbl_DbScopeBuf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char AcpiGbl_DbDebugFilename[ACPI_DB_LINE_BUFFER_SIZE];
+
/*
* Statistic globals
*/
diff --git a/sys/contrib/dev/acpica/include/aclocal.h b/sys/contrib/dev/acpica/include/aclocal.h
index a3569fb..5ee1430 100644
--- a/sys/contrib/dev/acpica/include/aclocal.h
+++ b/sys/contrib/dev/acpica/include/aclocal.h
@@ -453,6 +453,16 @@ typedef struct acpi_simple_repair_info
*
****************************************************************************/
+/* Dispatch info for each host-installed SCI handler */
+
+typedef struct acpi_sci_handler_info
+{
+ struct acpi_sci_handler_info *Next;
+ ACPI_SCI_HANDLER Address; /* Address of handler */
+ void *Context; /* Context to be passed to handler */
+
+} ACPI_SCI_HANDLER_INFO;
+
/* Dispatch info for each GPE -- either a method or handler, cannot be both */
typedef struct acpi_gpe_handler_info
@@ -1217,7 +1227,8 @@ typedef struct acpi_external_list
/* Values for Flags field above */
-#define ACPI_IPATH_ALLOCATED 0x01
+#define ACPI_IPATH_ALLOCATED 0x01
+#define ACPI_FROM_REFERENCE_FILE 0x02
typedef struct acpi_external_file
diff --git a/sys/contrib/dev/acpica/include/acnamesp.h b/sys/contrib/dev/acpica/include/acnamesp.h
index 63caba3..2dba629 100644
--- a/sys/contrib/dev/acpica/include/acnamesp.h
+++ b/sys/contrib/dev/acpica/include/acnamesp.h
@@ -272,6 +272,14 @@ AcpiNsDumpObjects (
ACPI_OWNER_ID OwnerId,
ACPI_HANDLE StartHandle);
+void
+AcpiNsDumpObjectPaths (
+ ACPI_OBJECT_TYPE Type,
+ UINT8 DisplayType,
+ UINT32 MaxDepth,
+ ACPI_OWNER_ID OwnerId,
+ ACPI_HANDLE StartHandle);
+
/*
* nseval - Namespace evaluation functions
diff --git a/sys/contrib/dev/acpica/include/acpiosxf.h b/sys/contrib/dev/acpica/include/acpiosxf.h
index 747a4ed..c948d3f 100644
--- a/sys/contrib/dev/acpica/include/acpiosxf.h
+++ b/sys/contrib/dev/acpica/include/acpiosxf.h
@@ -404,6 +404,7 @@ ACPI_STATUS
AcpiOsGetTableByIndex (
UINT32 Index,
ACPI_TABLE_HEADER **Table,
+ UINT32 *Instance,
ACPI_PHYSICAL_ADDRESS *Address);
ACPI_STATUS
diff --git a/sys/contrib/dev/acpica/include/acpixf.h b/sys/contrib/dev/acpica/include/acpixf.h
index 4f4e81a..80f724a 100644
--- a/sys/contrib/dev/acpica/include/acpixf.h
+++ b/sys/contrib/dev/acpica/include/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20130725
+#define ACPI_CA_VERSION 0x20130823
#include <contrib/dev/acpica/include/acconfig.h>
#include <contrib/dev/acpica/include/actypes.h>
@@ -377,6 +377,17 @@ AcpiInstallInitializationHandler (
ACPI_HW_DEPENDENT_RETURN_STATUS (
ACPI_STATUS
+AcpiInstallSciHandler (
+ ACPI_SCI_HANDLER Address,
+ void *Context))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS (
+ACPI_STATUS
+AcpiRemoveSciHandler (
+ ACPI_SCI_HANDLER Address))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS (
+ACPI_STATUS
AcpiInstallGlobalEventHandler (
ACPI_GBL_EVENT_HANDLER Handler,
void *Context))
diff --git a/sys/contrib/dev/acpica/include/actypes.h b/sys/contrib/dev/acpica/include/actypes.h
index cf98e4b..dafbfd5 100644
--- a/sys/contrib/dev/acpica/include/actypes.h
+++ b/sys/contrib/dev/acpica/include/actypes.h
@@ -488,6 +488,11 @@ typedef UINT64 ACPI_INTEGER;
#define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
#endif
+/* Support for the special RSDP signature (8 characters) */
+
+#define ACPI_VALIDATE_RSDP_SIG(a) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest) (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+
/*******************************************************************************
*
@@ -1006,6 +1011,10 @@ typedef void
* Various handlers and callback procedures
*/
typedef
+UINT32 (*ACPI_SCI_HANDLER) (
+ void *Context);
+
+typedef
void (*ACPI_GBL_EVENT_HANDLER) (
UINT32 EventType,
ACPI_HANDLE Device,
diff --git a/sys/contrib/dev/acpica/os_specific/service_layers/osunixxf.c b/sys/contrib/dev/acpica/os_specific/service_layers/osunixxf.c
index 19637a0..31bb471 100644
--- a/sys/contrib/dev/acpica/os_specific/service_layers/osunixxf.c
+++ b/sys/contrib/dev/acpica/os_specific/service_layers/osunixxf.c
@@ -1023,6 +1023,7 @@ AcpiOsReadPciConfiguration (
UINT32 Width)
{
+ *Value = 0;
return (AE_OK);
}
diff --git a/sys/contrib/rdma/krping/krping.c b/sys/contrib/rdma/krping/krping.c
index 99d1924..c0acf0c 100644
--- a/sys/contrib/rdma/krping/krping.c
+++ b/sys/contrib/rdma/krping/krping.c
@@ -119,7 +119,7 @@ static void krping_wait(struct krping_cb *cb, int state)
int rc;
mtx_lock(&cb->lock);
while (cb->state < state) {
- rc = msleep(cb, &cb->lock, 0, "krping", 0);
+ rc = msleep(cb, &cb->lock, PCATCH, "krping", 0);
if (rc && rc != ERESTART) {
cb->state = ERROR;
break;
@@ -188,7 +188,12 @@ static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
case RDMA_CM_EVENT_DEVICE_REMOVAL:
DEBUG_LOG(PFX "cma detected device removal!!!!\n");
- break;
+ cb->state = ERROR;
+ wakeup(cb);
+ mtx_unlock(&cb->lock);
+ krping_wait(cb, CLEANUP);
+ tsleep(cb, 0, "krping", 5000);
+ return 0;
default:
log(LOG_ERR, "oof bad type!\n");
@@ -603,6 +608,8 @@ static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
}
DEBUG_LOG(PFX "created pd %p\n", cb->pd);
+ strlcpy(cb->name, cb->pd->device->name, sizeof(cb->name));
+
cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
cb, cb->txdepth * 2, 0);
if (IS_ERR(cb->cq)) {
@@ -1164,7 +1171,7 @@ static void krping_wlat_test_server(struct krping_cb *cb)
}
wlat_test(cb);
-
+ krping_wait(cb, ERROR);
}
static void krping_bw_test_server(struct krping_cb *cb)
@@ -1776,6 +1783,12 @@ int krping_doit(char *cmd)
else
krping_run_client(cb);
DEBUG_LOG(PFX "destroy cm_id %p\n", cb->cm_id);
+
+ mtx_lock(&cb->lock);
+ cb->state = CLEANUP;
+ wakeup(cb);
+ mtx_unlock(&cb->lock);
+
rdma_destroy_id(cb->cm_id);
out:
mtx_lock(&krping_mutex);
diff --git a/sys/contrib/rdma/krping/krping.h b/sys/contrib/rdma/krping/krping.h
index d234825..5cced30 100644
--- a/sys/contrib/rdma/krping/krping.h
+++ b/sys/contrib/rdma/krping/krping.h
@@ -37,7 +37,8 @@ enum test_state {
RDMA_READ_COMPLETE,
RDMA_WRITE_ADV,
RDMA_WRITE_COMPLETE,
- ERROR
+ ERROR,
+ CLEANUP
};
struct krping_rdma_info {
@@ -100,13 +101,15 @@ struct krping_cb {
/* listener on service side. */
struct rdma_cm_id *child_cm_id; /* connection on server side */
TAILQ_ENTRY(krping_cb) list;
-
+
int rlat; /* run read latency test */
int wlat; /* run write latency test */
int bw; /* run write bw test */
int duplex; /* run write bw full duplex test */
int poll; /* poll vs block in rlat */
int txdepth;
+
+ char name[16];
};
static __inline uint64_t
diff --git a/sys/contrib/rdma/krping/krping_dev.c b/sys/contrib/rdma/krping/krping_dev.c
index 92d954d..d6ab00a 100644
--- a/sys/contrib/rdma/krping/krping_dev.c
+++ b/sys/contrib/rdma/krping/krping_dev.c
@@ -112,12 +112,11 @@ krping_read(struct cdev *dev, struct uio *uio, int ioflag)
mtx_unlock(&krping_mutex);
while (!TAILQ_EMPTY(&copy_cbs)) {
-
cb = TAILQ_FIRST(&copy_cbs);
TAILQ_REMOVE(&copy_cbs, cb, list);
if (cb->pd) {
uprintf("krping: %4d %10s %10u %10u %10u %10u %10u %10u %10u %10u\n",
- num++, cb->pd->device->name, cb->stats.send_bytes,
+ num++, cb->name, cb->stats.send_bytes,
cb->stats.send_msgs, cb->stats.recv_bytes,
cb->stats.recv_msgs, cb->stats.write_bytes,
cb->stats.write_msgs,
diff --git a/sys/dev/mfi/mfi_cam.c b/sys/dev/mfi/mfi_cam.c
index b6ceb57..e21b089 100644
--- a/sys/dev/mfi/mfi_cam.c
+++ b/sys/dev/mfi/mfi_cam.c
@@ -308,17 +308,16 @@ mfip_cam_rescan(struct mfi_softc *sc, uint32_t tid)
return;
}
camsc->state = MFIP_STATE_RESCAN;
- mtx_unlock(&sc->mfi_io_lock);
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
+ mtx_unlock(&sc->mfi_io_lock);
device_printf(sc->mfi_dev,
"Cannot allocate ccb for bus rescan.\n");
return;
}
sim = camsc->sim;
- mtx_lock(&sc->mfi_io_lock);
if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sim),
tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
xpt_free_ccb(ccb);
@@ -327,11 +326,8 @@ mfip_cam_rescan(struct mfi_softc *sc, uint32_t tid)
"Cannot create path for bus rescan.\n");
return;
}
- mtx_unlock(&sc->mfi_io_lock);
-
xpt_rescan(ccb);
- mtx_lock(&sc->mfi_io_lock);
camsc->state = MFIP_STATE_NONE;
mtx_unlock(&sc->mfi_io_lock);
}
diff --git a/sys/dev/vmware/vmxnet3/if_vmx.c b/sys/dev/vmware/vmxnet3/if_vmx.c
new file mode 100644
index 0000000..c9168d4
--- /dev/null
+++ b/sys/dev/vmware/vmxnet3/if_vmx.c
@@ -0,0 +1,3305 @@
+/*-
+ * Copyright (c) 2013 Tsubai Masanari
+ * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
+ */
+
+/* Driver for VMware vmxnet3 virtual ethernet devices. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/if_vlan_var.h>
+
+#include <net/bpf.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet/udp.h>
+#include <netinet/tcp.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "if_vmxreg.h"
+#include "if_vmxvar.h"
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+/* Always enable for now - useful for queue hangs. */
+#define VMXNET3_DEBUG_SYSCTL
+
+#ifdef VMXNET3_FAILPOINTS
+#include <sys/fail.h>
+static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
+ "vmxnet3 fail points");
+#define VMXNET3_FP _debug_fail_point_vmxnet3
+#endif
+
+static int vmxnet3_probe(device_t);
+static int vmxnet3_attach(device_t);
+static int vmxnet3_detach(device_t);
+static int vmxnet3_shutdown(device_t);
+
+static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
+static void vmxnet3_free_resources(struct vmxnet3_softc *);
+static int vmxnet3_check_version(struct vmxnet3_softc *);
+static void vmxnet3_initial_config(struct vmxnet3_softc *);
+
+static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
+static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
+static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
+static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
+ struct vmxnet3_interrupt *);
+static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
+static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
+static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
+static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
+static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
+
+static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
+ struct vmxnet3_interrupt *);
+static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
+
+static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
+static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
+static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
+static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
+static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
+static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
+
+static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
+static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
+static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
+static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
+static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
+static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
+static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
+static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
+static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
+static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
+static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
+static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
+static int vmxnet3_alloc_data(struct vmxnet3_softc *);
+static void vmxnet3_free_data(struct vmxnet3_softc *);
+static int vmxnet3_setup_interface(struct vmxnet3_softc *);
+
+static void vmxnet3_evintr(struct vmxnet3_softc *);
+static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
+static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
+static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
+static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
+ struct vmxnet3_rxring *, int);
+static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
+static void vmxnet3_legacy_intr(void *);
+static void vmxnet3_txq_intr(void *);
+static void vmxnet3_rxq_intr(void *);
+static void vmxnet3_event_intr(void *);
+
+static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
+static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
+static void vmxnet3_stop(struct vmxnet3_softc *);
+
+static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
+static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
+static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
+static int vmxnet3_enable_device(struct vmxnet3_softc *);
+static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
+static int vmxnet3_reinit(struct vmxnet3_softc *);
+static void vmxnet3_init_locked(struct vmxnet3_softc *);
+static void vmxnet3_init(void *);
+
+static int vmxnet3_txq_offload_ctx(struct mbuf *, int *, int *, int *);
+static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
+ bus_dmamap_t, bus_dma_segment_t [], int *);
+static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
+static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
+static void vmxnet3_start_locked(struct ifnet *);
+static void vmxnet3_start(struct ifnet *);
+
+static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
+ uint16_t);
+static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
+static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
+static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
+static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
+static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
+
+static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
+static void vmxnet3_tick(void *);
+static void vmxnet3_link_status(struct vmxnet3_softc *);
+static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
+static int vmxnet3_media_change(struct ifnet *);
+static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
+static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
+
+static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
+ struct sysctl_ctx_list *, struct sysctl_oid_list *);
+static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
+ struct sysctl_ctx_list *, struct sysctl_oid_list *);
+static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
+ struct sysctl_ctx_list *, struct sysctl_oid_list *);
+static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
+
+static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
+ uint32_t);
+static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
+static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
+ uint32_t);
+static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
+static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
+
+static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
+static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
+static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
+static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
+
+static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
+ bus_size_t, struct vmxnet3_dma_alloc *);
+static void vmxnet3_dma_free(struct vmxnet3_softc *,
+ struct vmxnet3_dma_alloc *);
+
+typedef enum {
+ VMXNET3_BARRIER_RD,
+ VMXNET3_BARRIER_WR,
+ VMXNET3_BARRIER_RDWR,
+} vmxnet3_barrier_t;
+
+static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
+
+static device_method_t vmxnet3_methods[] = {
+ /* Device interface. */
+ DEVMETHOD(device_probe, vmxnet3_probe),
+ DEVMETHOD(device_attach, vmxnet3_attach),
+ DEVMETHOD(device_detach, vmxnet3_detach),
+ DEVMETHOD(device_shutdown, vmxnet3_shutdown),
+
+ DEVMETHOD_END
+};
+
+static driver_t vmxnet3_driver = {
+ "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
+};
+
+static devclass_t vmxnet3_devclass;
+DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
+
+MODULE_DEPEND(vmx, pci, 1, 1, 1);
+MODULE_DEPEND(vmx, ether, 1, 1, 1);
+
+#define VMXNET3_VMWARE_VENDOR_ID 0x15AD
+#define VMXNET3_VMWARE_DEVICE_ID 0x07B0
+
+static int
+vmxnet3_probe(device_t dev)
+{
+
+ if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
+ pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
+ device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+vmxnet3_attach(device_t dev)
+{
+ struct vmxnet3_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->vmx_dev = dev;
+
+ pci_enable_busmaster(dev);
+
+ VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
+ callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
+
+ vmxnet3_initial_config(sc);
+
+ error = vmxnet3_alloc_resources(sc);
+ if (error)
+ goto fail;
+
+ error = vmxnet3_check_version(sc);
+ if (error)
+ goto fail;
+
+ error = vmxnet3_alloc_rxtx_queues(sc);
+ if (error)
+ goto fail;
+
+ error = vmxnet3_alloc_interrupts(sc);
+ if (error)
+ goto fail;
+
+ error = vmxnet3_alloc_data(sc);
+ if (error)
+ goto fail;
+
+ error = vmxnet3_setup_interface(sc);
+ if (error)
+ goto fail;
+
+ error = vmxnet3_setup_interrupts(sc);
+ if (error) {
+ ether_ifdetach(sc->vmx_ifp);
+ device_printf(dev, "could not set up interrupt\n");
+ goto fail;
+ }
+
+ vmxnet3_setup_sysctl(sc);
+ vmxnet3_link_status(sc);
+
+fail:
+ if (error)
+ vmxnet3_detach(dev);
+
+ return (error);
+}
+
+static int
+vmxnet3_detach(device_t dev)
+{
+ struct vmxnet3_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ ifp = sc->vmx_ifp;
+
+ if (device_is_attached(dev)) {
+ ether_ifdetach(ifp);
+ VMXNET3_CORE_LOCK(sc);
+ vmxnet3_stop(sc);
+ VMXNET3_CORE_UNLOCK(sc);
+ callout_drain(&sc->vmx_tick);
+ }
+
+ if (sc->vmx_vlan_attach != NULL) {
+ EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
+ sc->vmx_vlan_attach = NULL;
+ }
+ if (sc->vmx_vlan_detach != NULL) {
+ EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
+ sc->vmx_vlan_detach = NULL;
+ }
+
+ vmxnet3_free_interrupts(sc);
+
+ if (ifp != NULL) {
+ if_free(ifp);
+ sc->vmx_ifp = NULL;
+ }
+
+ ifmedia_removeall(&sc->vmx_media);
+
+ vmxnet3_free_data(sc);
+ vmxnet3_free_resources(sc);
+ vmxnet3_free_rxtx_queues(sc);
+
+ VMXNET3_CORE_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static int
+vmxnet3_shutdown(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ int rid;
+
+ dev = sc->vmx_dev;
+
+ rid = PCIR_BAR(0);
+ sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->vmx_res0 == NULL) {
+ device_printf(dev,
+ "could not map BAR0 memory\n");
+ return (ENXIO);
+ }
+
+ sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
+ sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
+
+ rid = PCIR_BAR(1);
+ sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->vmx_res1 == NULL) {
+ device_printf(dev,
+ "could not map BAR1 memory\n");
+ return (ENXIO);
+ }
+
+ sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
+ sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
+
+ if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
+ rid = PCIR_BAR(2);
+ sc->vmx_msix_res = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ }
+
+ if (sc->vmx_msix_res == NULL)
+ sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
+
+ return (0);
+}
+
+static void
+vmxnet3_free_resources(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ int rid;
+
+ dev = sc->vmx_dev;
+
+ if (sc->vmx_res0 != NULL) {
+ rid = PCIR_BAR(0);
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
+ sc->vmx_res0 = NULL;
+ }
+
+ if (sc->vmx_res1 != NULL) {
+ rid = PCIR_BAR(1);
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
+ sc->vmx_res1 = NULL;
+ }
+
+ if (sc->vmx_msix_res != NULL) {
+ rid = PCIR_BAR(2);
+ bus_release_resource(dev, SYS_RES_MEMORY, rid,
+ sc->vmx_msix_res);
+ sc->vmx_msix_res = NULL;
+ }
+}
+
+static int
+vmxnet3_check_version(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ uint32_t version;
+
+ dev = sc->vmx_dev;
+
+ version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
+ if ((version & 0x01) == 0) {
+ device_printf(dev, "unsupported hardware version %#x\n",
+ version);
+ return (ENOTSUP);
+ } else
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
+
+ version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
+ if ((version & 0x01) == 0) {
+ device_printf(dev, "unsupported UPT version %#x\n", version);
+ return (ENOTSUP);
+ } else
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
+
+ return (0);
+}
+
+static void
+vmxnet3_initial_config(struct vmxnet3_softc *sc)
+{
+
+ sc->vmx_ntxqueues = 1;
+ sc->vmx_nrxqueues = 1;
+ sc->vmx_ntxdescs = VMXNET3_MAX_TX_NDESC;
+ sc->vmx_nrxdescs = VMXNET3_MAX_RX_NDESC;
+ sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
+}
+
+static int
+vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ int nmsix, cnt, required;
+
+ dev = sc->vmx_dev;
+
+ if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
+ return (1);
+
+ /* Allocate an additional vector for the events interrupt. */
+ required = sc->vmx_nrxqueues + sc->vmx_ntxqueues + 1;
+
+ nmsix = pci_msix_count(dev);
+ if (nmsix < required)
+ return (1);
+
+ cnt = required;
+ if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
+ sc->vmx_nintrs = required;
+ return (0);
+ } else
+ pci_release_msi(dev);
+
+ return (1);
+}
+
+static int
+vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ int nmsi, cnt, required;
+
+ dev = sc->vmx_dev;
+ required = 1;
+
+ nmsi = pci_msi_count(dev);
+ if (nmsi < required)
+ return (1);
+
+ cnt = required;
+ if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
+ sc->vmx_nintrs = 1;
+ return (0);
+ } else
+ pci_release_msi(dev);
+
+ return (1);
+}
+
+static int
+vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
+{
+
+ sc->vmx_nintrs = 1;
+ return (0);
+}
+
+static int
+vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
+ struct vmxnet3_interrupt *intr)
+{
+ struct resource *irq;
+
+ irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
+ if (irq == NULL)
+ return (ENXIO);
+
+ intr->vmxi_irq = irq;
+ intr->vmxi_rid = rid;
+
+ return (0);
+}
+
+static int
+vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
+{
+ int i, rid, flags, error;
+
+ rid = 0;
+ flags = RF_ACTIVE;
+
+ if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
+ flags |= RF_SHAREABLE;
+ else
+ rid = 1;
+
+ for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
+ error = vmxnet3_alloc_interrupt(sc, rid, flags,
+ &sc->vmx_intrs[i]);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+/*
+ * NOTE: We only support the simple case of each Rx and Tx queue on its
+ * own MSIX vector. This is good enough until we support mulitqueue.
+ */
+static int
+vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_interrupt *intr;
+ enum intr_type type;
+ int i, error;
+
+ dev = sc->vmx_dev;
+ intr = &sc->vmx_intrs[0];
+ type = INTR_TYPE_NET | INTR_MPSAFE;
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
+ txq = &sc->vmx_txq[i];
+ error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
+ vmxnet3_txq_intr, txq, &intr->vmxi_handler);
+ if (error)
+ return (error);
+ txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
+ }
+
+ for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
+ rxq = &sc->vmx_rxq[i];
+ error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
+ vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
+ if (error)
+ return (error);
+ rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
+ }
+
+ error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
+ vmxnet3_event_intr, sc, &intr->vmxi_handler);
+ if (error)
+ return (error);
+ sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
+
+ return (0);
+}
+
+static int
+vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
+{
+ struct vmxnet3_interrupt *intr;
+ int i, error;
+
+ intr = &sc->vmx_intrs[0];
+ error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
+ &intr->vmxi_handler);
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++)
+ sc->vmx_txq[i].vxtxq_intr_idx = 0;
+ for (i = 0; i < sc->vmx_nrxqueues; i++)
+ sc->vmx_rxq[i].vxrxq_intr_idx = 0;
+ sc->vmx_event_intr_idx = 0;
+
+ return (error);
+}
+
+/*
+ * XXX BMV Should probably reorganize the attach and just do
+ * this in vmxnet3_init_shared_data().
+ */
+static void
+vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
+{
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_txq_shared *txs;
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_rxq_shared *rxs;
+ int i;
+
+ sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++) {
+ txq = &sc->vmx_txq[i];
+ txs = txq->vxtxq_ts;
+ txs->intr_idx = txq->vxtxq_intr_idx;
+ }
+
+ for (i = 0; i < sc->vmx_nrxqueues; i++) {
+ rxq = &sc->vmx_rxq[i];
+ rxs = rxq->vxrxq_rs;
+ rxs->intr_idx = rxq->vxrxq_intr_idx;
+ }
+}
+
+static int
+vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
+{
+ int error;
+
+ error = vmxnet3_alloc_intr_resources(sc);
+ if (error)
+ return (error);
+
+ switch (sc->vmx_intr_type) {
+ case VMXNET3_IT_MSIX:
+ error = vmxnet3_setup_msix_interrupts(sc);
+ break;
+ case VMXNET3_IT_MSI:
+ case VMXNET3_IT_LEGACY:
+ error = vmxnet3_setup_legacy_interrupt(sc);
+ break;
+ default:
+ panic("%s: invalid interrupt type %d", __func__,
+ sc->vmx_intr_type);
+ }
+
+ if (error == 0)
+ vmxnet3_set_interrupt_idx(sc);
+
+ return (error);
+}
+
+static int
+vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ uint32_t config;
+ int error;
+
+ dev = sc->vmx_dev;
+ config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
+
+ sc->vmx_intr_type = config & 0x03;
+ sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
+
+ switch (sc->vmx_intr_type) {
+ case VMXNET3_IT_AUTO:
+ sc->vmx_intr_type = VMXNET3_IT_MSIX;
+ /* FALLTHROUGH */
+ case VMXNET3_IT_MSIX:
+ error = vmxnet3_alloc_msix_interrupts(sc);
+ if (error == 0)
+ break;
+ sc->vmx_intr_type = VMXNET3_IT_MSI;
+ /* FALLTHROUGH */
+ case VMXNET3_IT_MSI:
+ error = vmxnet3_alloc_msi_interrupts(sc);
+ if (error == 0)
+ break;
+ sc->vmx_intr_type = VMXNET3_IT_LEGACY;
+ /* FALLTHROUGH */
+ case VMXNET3_IT_LEGACY:
+ error = vmxnet3_alloc_legacy_interrupts(sc);
+ if (error == 0)
+ break;
+ /* FALLTHROUGH */
+ default:
+ sc->vmx_intr_type = -1;
+ device_printf(dev, "cannot allocate any interrupt resources\n");
+ return (ENXIO);
+ }
+
+ return (error);
+}
+
+static void
+vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
+ struct vmxnet3_interrupt *intr)
+{
+ device_t dev;
+
+ dev = sc->vmx_dev;
+
+ if (intr->vmxi_handler != NULL) {
+ bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
+ intr->vmxi_handler = NULL;
+ }
+
+ if (intr->vmxi_irq != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
+ intr->vmxi_irq);
+ intr->vmxi_irq = NULL;
+ intr->vmxi_rid = -1;
+ }
+}
+
+static void
+vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->vmx_nintrs; i++)
+ vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
+
+ if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
+ sc->vmx_intr_type == VMXNET3_IT_MSIX)
+ pci_release_msi(sc->vmx_dev);
+}
+
+static int
+vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
+{
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_rxring *rxr;
+ int i;
+
+ rxq = &sc->vmx_rxq[q];
+
+ snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
+ device_get_nameunit(sc->vmx_dev), q);
+ mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
+
+ rxq->vxrxq_sc = sc;
+ rxq->vxrxq_id = q;
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+ rxr->vxrxr_rid = i;
+ rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
+ rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
+ sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (rxr->vxrxr_rxbuf == NULL)
+ return (ENOMEM);
+ }
+
+ rxq->vxrxq_comp_ring.vxcr_ndesc =
+ sc->vmx_nrxdescs * VMXNET3_RXRINGS_PERQ;
+
+ return (0);
+}
+
+static int
+vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
+{
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_txring *txr;
+
+ txq = &sc->vmx_txq[q];
+ txr = &txq->vxtxq_cmd_ring;
+
+ snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
+ device_get_nameunit(sc->vmx_dev), q);
+ mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
+
+ txq->vxtxq_sc = sc;
+ txq->vxtxq_id = q;
+
+ txr->vxtxr_ndesc = sc->vmx_ntxdescs;
+ txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
+ sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (txr->vxtxr_txbuf == NULL)
+ return (ENOMEM);
+
+ txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
+
+ return (0);
+}
+
+static int
+vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
+{
+ int i, error;
+
+ sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
+ sc->vmx_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
+ sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
+ sc->vmx_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
+ return (ENOMEM);
+
+ for (i = 0; i < sc->vmx_nrxqueues; i++) {
+ error = vmxnet3_init_rxq(sc, i);
+ if (error)
+ return (error);
+ }
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++) {
+ error = vmxnet3_init_txq(sc, i);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+static void
+vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
+{
+ struct vmxnet3_rxring *rxr;
+ int i;
+
+ rxq->vxrxq_sc = NULL;
+ rxq->vxrxq_id = -1;
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+
+ if (rxr->vxrxr_rxbuf != NULL) {
+ free(rxr->vxrxr_rxbuf, M_DEVBUF);
+ rxr->vxrxr_rxbuf = NULL;
+ }
+ }
+
+ if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
+ mtx_destroy(&rxq->vxrxq_mtx);
+}
+
+static void
+vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
+{
+ struct vmxnet3_txring *txr;
+
+ txr = &txq->vxtxq_cmd_ring;
+
+ txq->vxtxq_sc = NULL;
+ txq->vxtxq_id = -1;
+
+ if (txr->vxtxr_txbuf != NULL) {
+ free(txr->vxtxr_txbuf, M_DEVBUF);
+ txr->vxtxr_txbuf = NULL;
+ }
+
+ if (mtx_initialized(&txq->vxtxq_mtx) != 0)
+ mtx_destroy(&txq->vxtxq_mtx);
+}
+
+static void
+vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
+{
+ int i;
+
+ if (sc->vmx_rxq != NULL) {
+ for (i = 0; i < sc->vmx_nrxqueues; i++)
+ vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
+ free(sc->vmx_rxq, M_DEVBUF);
+ sc->vmx_rxq = NULL;
+ }
+
+ if (sc->vmx_txq != NULL) {
+ for (i = 0; i < sc->vmx_ntxqueues; i++)
+ vmxnet3_destroy_txq(&sc->vmx_txq[i]);
+ free(sc->vmx_txq, M_DEVBUF);
+ sc->vmx_txq = NULL;
+ }
+}
+
+static int
+vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ uint8_t *kva;
+ size_t size;
+ int i, error;
+
+ dev = sc->vmx_dev;
+
+ size = sizeof(struct vmxnet3_driver_shared);
+ error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
+ if (error) {
+ device_printf(dev, "cannot alloc shared memory\n");
+ return (error);
+ }
+ sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
+
+ size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
+ sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
+ error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
+ if (error) {
+ device_printf(dev, "cannot alloc queue shared memory\n");
+ return (error);
+ }
+ sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
+ kva = sc->vmx_qs;
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++) {
+ sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
+ kva += sizeof(struct vmxnet3_txq_shared);
+ }
+ for (i = 0; i < sc->vmx_nrxqueues; i++) {
+ sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
+ kva += sizeof(struct vmxnet3_rxq_shared);
+ }
+
+ return (0);
+}
+
+static void
+vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
+{
+
+ if (sc->vmx_qs != NULL) {
+ vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
+ sc->vmx_qs = NULL;
+ }
+
+ if (sc->vmx_ds != NULL) {
+ vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
+ sc->vmx_ds = NULL;
+ }
+}
+
+static int
+vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_txring *txr;
+ struct vmxnet3_comp_ring *txc;
+ size_t descsz, compsz;
+ int i, q, error;
+
+ dev = sc->vmx_dev;
+
+ for (q = 0; q < sc->vmx_ntxqueues; q++) {
+ txq = &sc->vmx_txq[q];
+ txr = &txq->vxtxq_cmd_ring;
+ txc = &txq->vxtxq_comp_ring;
+
+ descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
+ compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev),
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ VMXNET3_TSO_MAXSIZE, /* maxsize */
+ VMXNET3_TX_MAXSEGS, /* nsegments */
+ VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &txr->vxtxr_txtag);
+ if (error) {
+ device_printf(dev,
+ "unable to create Tx buffer tag for queue %d\n", q);
+ return (error);
+ }
+
+ error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
+ if (error) {
+ device_printf(dev, "cannot alloc Tx descriptors for "
+ "queue %d error %d\n", q, error);
+ return (error);
+ }
+ txr->vxtxr_txd =
+ (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
+
+ error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
+ if (error) {
+ device_printf(dev, "cannot alloc Tx comp descriptors "
+ "for queue %d error %d\n", q, error);
+ return (error);
+ }
+ txc->vxcr_u.txcd =
+ (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
+
+ for (i = 0; i < txr->vxtxr_ndesc; i++) {
+ error = bus_dmamap_create(txr->vxtxr_txtag, 0,
+ &txr->vxtxr_txbuf[i].vtxb_dmamap);
+ if (error) {
+ device_printf(dev, "unable to create Tx buf "
+ "dmamap for queue %d idx %d\n", q, i);
+ return (error);
+ }
+ }
+ }
+
+ return (0);
+}
+
+static void
+vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_txring *txr;
+ struct vmxnet3_comp_ring *txc;
+ struct vmxnet3_txbuf *txb;
+ int i, q;
+
+ dev = sc->vmx_dev;
+
+ for (q = 0; q < sc->vmx_ntxqueues; q++) {
+ txq = &sc->vmx_txq[q];
+ txr = &txq->vxtxq_cmd_ring;
+ txc = &txq->vxtxq_comp_ring;
+
+ for (i = 0; i < txr->vxtxr_ndesc; i++) {
+ txb = &txr->vxtxr_txbuf[i];
+ if (txb->vtxb_dmamap != NULL) {
+ bus_dmamap_destroy(txr->vxtxr_txtag,
+ txb->vtxb_dmamap);
+ txb->vtxb_dmamap = NULL;
+ }
+ }
+
+ if (txc->vxcr_u.txcd != NULL) {
+ vmxnet3_dma_free(sc, &txc->vxcr_dma);
+ txc->vxcr_u.txcd = NULL;
+ }
+
+ if (txr->vxtxr_txd != NULL) {
+ vmxnet3_dma_free(sc, &txr->vxtxr_dma);
+ txr->vxtxr_txd = NULL;
+ }
+
+ if (txr->vxtxr_txtag != NULL) {
+ bus_dma_tag_destroy(txr->vxtxr_txtag);
+ txr->vxtxr_txtag = NULL;
+ }
+ }
+}
+
+static int
+vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_rxring *rxr;
+ struct vmxnet3_comp_ring *rxc;
+ int descsz, compsz;
+ int i, j, q, error;
+
+ dev = sc->vmx_dev;
+
+ for (q = 0; q < sc->vmx_nrxqueues; q++) {
+ rxq = &sc->vmx_rxq[q];
+ rxc = &rxq->vxrxq_comp_ring;
+ compsz = 0;
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+
+ descsz = rxr->vxrxr_ndesc *
+ sizeof(struct vmxnet3_rxdesc);
+ compsz += rxr->vxrxr_ndesc *
+ sizeof(struct vmxnet3_rxcompdesc);
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev),
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUMPAGESIZE, /* maxsize */
+ 1, /* nsegments */
+ MJUMPAGESIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &rxr->vxrxr_rxtag);
+ if (error) {
+ device_printf(dev,
+ "unable to create Rx buffer tag for "
+ "queue %d\n", q);
+ return (error);
+ }
+
+ error = vmxnet3_dma_malloc(sc, descsz, 512,
+ &rxr->vxrxr_dma);
+ if (error) {
+ device_printf(dev, "cannot allocate Rx "
+ "descriptors for queue %d/%d error %d\n",
+ i, q, error);
+ return (error);
+ }
+ rxr->vxrxr_rxd =
+ (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
+ }
+
+ error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
+ if (error) {
+ device_printf(dev, "cannot alloc Rx comp descriptors "
+ "for queue %d error %d\n", q, error);
+ return (error);
+ }
+ rxc->vxcr_u.rxcd =
+ (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+
+ error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
+ &rxr->vxrxr_spare_dmap);
+ if (error) {
+ device_printf(dev, "unable to create spare "
+ "dmamap for queue %d/%d error %d\n",
+ q, i, error);
+ return (error);
+ }
+
+ for (j = 0; j < rxr->vxrxr_ndesc; j++) {
+ error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
+ &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
+ if (error) {
+ device_printf(dev, "unable to create "
+ "dmamap for queue %d/%d slot %d "
+ "error %d\n",
+ q, i, j, error);
+ return (error);
+ }
+ }
+ }
+ }
+
+ return (0);
+}
+
+static void
+vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_rxring *rxr;
+ struct vmxnet3_comp_ring *rxc;
+ struct vmxnet3_rxbuf *rxb;
+ int i, j, q;
+
+ dev = sc->vmx_dev;
+
+ for (q = 0; q < sc->vmx_nrxqueues; q++) {
+ rxq = &sc->vmx_rxq[q];
+ rxc = &rxq->vxrxq_comp_ring;
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+
+ if (rxr->vxrxr_spare_dmap != NULL) {
+ bus_dmamap_destroy(rxr->vxrxr_rxtag,
+ rxr->vxrxr_spare_dmap);
+ rxr->vxrxr_spare_dmap = NULL;
+ }
+
+ for (j = 0; j < rxr->vxrxr_ndesc; j++) {
+ rxb = &rxr->vxrxr_rxbuf[j];
+ if (rxb->vrxb_dmamap != NULL) {
+ bus_dmamap_destroy(rxr->vxrxr_rxtag,
+ rxb->vrxb_dmamap);
+ rxb->vrxb_dmamap = NULL;
+ }
+ }
+ }
+
+ if (rxc->vxcr_u.rxcd != NULL) {
+ vmxnet3_dma_free(sc, &rxc->vxcr_dma);
+ rxc->vxcr_u.rxcd = NULL;
+ }
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+
+ if (rxr->vxrxr_rxd != NULL) {
+ vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
+ rxr->vxrxr_rxd = NULL;
+ }
+
+ if (rxr->vxrxr_rxtag != NULL) {
+ bus_dma_tag_destroy(rxr->vxrxr_rxtag);
+ rxr->vxrxr_rxtag = NULL;
+ }
+ }
+ }
+}
+
+static int
+vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
+{
+ int error;
+
+ error = vmxnet3_alloc_txq_data(sc);
+ if (error)
+ return (error);
+
+ error = vmxnet3_alloc_rxq_data(sc);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+static void
+vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
+{
+
+ vmxnet3_free_rxq_data(sc);
+ vmxnet3_free_txq_data(sc);
+}
+
+static int
+vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
+{
+ int error;
+
+ error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
+ 32, &sc->vmx_mcast_dma);
+ if (error)
+ device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
+ else
+ sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
+
+ return (error);
+}
+
+static void
+vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
+{
+
+ if (sc->vmx_mcast != NULL) {
+ vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
+ sc->vmx_mcast = NULL;
+ }
+}
+
+static void
+vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
+{
+ struct vmxnet3_driver_shared *ds;
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_txq_shared *txs;
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_rxq_shared *rxs;
+ int i;
+
+ ds = sc->vmx_ds;
+
+ /*
+ * Initialize fields of the shared data that remains the same across
+ * reinits. Note the shared data is zero'd when allocated.
+ */
+
+ ds->magic = VMXNET3_REV1_MAGIC;
+
+ /* DriverInfo */
+ ds->version = VMXNET3_DRIVER_VERSION;
+ ds->guest = VMXNET3_GOS_FREEBSD | VMXNET3_GUEST_OS_VERSION |
+#ifdef __LP64__
+ VMXNET3_GOS_64BIT;
+#else
+ VMXNET3_GOS_32BIT;
+#endif
+ ds->vmxnet3_revision = 1;
+ ds->upt_version = 1;
+
+ /* Misc. conf */
+ ds->driver_data = vtophys(sc);
+ ds->driver_data_len = sizeof(struct vmxnet3_softc);
+ ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
+ ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
+ ds->nrxsg_max = sc->vmx_max_rxsegs;
+
+ /* Interrupt control. */
+ ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
+ ds->nintr = sc->vmx_nintrs;
+ ds->evintr = sc->vmx_event_intr_idx;
+ ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
+
+ for (i = 0; i < sc->vmx_nintrs; i++)
+ ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
+
+ /* Receive filter. */
+ ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
+ ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
+
+ /* Tx queues */
+ for (i = 0; i < sc->vmx_ntxqueues; i++) {
+ txq = &sc->vmx_txq[i];
+ txs = txq->vxtxq_ts;
+
+ txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
+ txs->cmd_ring_len = sc->vmx_ntxdescs;
+ txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
+ txs->comp_ring_len = sc->vmx_ntxdescs;
+ txs->driver_data = vtophys(txq);
+ txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
+ }
+
+ /* Rx queues */
+ for (i = 0; i < sc->vmx_nrxqueues; i++) {
+ rxq = &sc->vmx_rxq[i];
+ rxs = rxq->vxrxq_rs;
+
+ rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
+ rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
+ rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
+ rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
+ rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
+ rxs->comp_ring_len = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc +
+ rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
+ rxs->driver_data = vtophys(rxq);
+ rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
+ }
+}
+
+static void
+vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->vmx_ifp;
+
+ /* Use the current MAC address. */
+ bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
+ vmxnet3_set_lladdr(sc);
+
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD;
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TSO6)
+ ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */
+}
+
+static void
+vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+ struct vmxnet3_driver_shared *ds;
+
+ ifp = sc->vmx_ifp;
+ ds = sc->vmx_ds;
+
+ ds->upt_features = 0;
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ ds->upt_features |= UPT1_F_VLAN;
+ if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
+ ds->upt_features |= UPT1_F_CSUM;
+ if (ifp->if_capenable & IFCAP_LRO)
+ ds->upt_features |= UPT1_F_LRO;
+
+ ds->mtu = ifp->if_mtu;
+ ds->ntxqueue = sc->vmx_ntxqueues;
+ ds->nrxqueue = sc->vmx_nrxqueues;
+
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
+ (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
+}
+
+static int
+vmxnet3_alloc_data(struct vmxnet3_softc *sc)
+{
+ int error;
+
+ error = vmxnet3_alloc_shared_data(sc);
+ if (error)
+ return (error);
+
+ error = vmxnet3_alloc_queue_data(sc);
+ if (error)
+ return (error);
+
+ error = vmxnet3_alloc_mcast_table(sc);
+ if (error)
+ return (error);
+
+ vmxnet3_init_shared_data(sc);
+
+ return (0);
+}
+
+static void
+vmxnet3_free_data(struct vmxnet3_softc *sc)
+{
+
+ vmxnet3_free_mcast_table(sc);
+ vmxnet3_free_queue_data(sc);
+ vmxnet3_free_shared_data(sc);
+}
+
+static int
+vmxnet3_setup_interface(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+
+ dev = sc->vmx_dev;
+
+ ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "cannot allocate ifnet structure\n");
+ return (ENOSPC);
+ }
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+#if __FreeBSD_version < 1000025
+ ifp->if_baudrate = 1000000000;
+#else
+ if_initbaudrate(ifp, IF_Gbps(10));
+#endif
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_init = vmxnet3_init;
+ ifp->if_ioctl = vmxnet3_ioctl;
+ ifp->if_start = vmxnet3_start;
+ ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ vmxnet3_get_lladdr(sc);
+ ether_ifattach(ifp, sc->vmx_lladdr);
+
+ ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
+ ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
+ ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
+ ifp->if_hwassist |= VMXNET3_CSUM_ALL_OFFLOAD;
+
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Capabilities after here are not enabled by default.
+ */
+
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+ sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
+ sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
+ vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
+
+ ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
+ vmxnet3_media_status);
+ ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
+
+ return (0);
+}
+
+static void
+vmxnet3_evintr(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+ struct vmxnet3_txq_shared *ts;
+ struct vmxnet3_rxq_shared *rs;
+ uint32_t event;
+ int reset;
+
+ dev = sc->vmx_dev;
+ ifp = sc->vmx_ifp;
+ reset = 0;
+
+ VMXNET3_CORE_LOCK(sc);
+
+ /* Clear events. */
+ event = sc->vmx_ds->event;
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
+
+ if (event & VMXNET3_EVENT_LINK)
+ vmxnet3_link_status(sc);
+
+ if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
+ reset = 1;
+ vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
+ ts = sc->vmx_txq[0].vxtxq_ts;
+ if (ts->stopped != 0)
+ device_printf(dev, "Tx queue error %#x\n", ts->error);
+ rs = sc->vmx_rxq[0].vxrxq_rs;
+ if (rs->stopped != 0)
+ device_printf(dev, "Rx queue error %#x\n", rs->error);
+ device_printf(dev, "Rx/Tx queue error event ... resetting\n");
+ }
+
+ if (event & VMXNET3_EVENT_DIC)
+ device_printf(dev, "device implementation change event\n");
+ if (event & VMXNET3_EVENT_DEBUG)
+ device_printf(dev, "debug event\n");
+
+ if (reset != 0) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vmxnet3_init_locked(sc);
+ }
+
+ VMXNET3_CORE_UNLOCK(sc);
+}
+
+static void
+vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
+{
+ struct vmxnet3_softc *sc;
+ struct ifnet *ifp;
+ struct vmxnet3_txring *txr;
+ struct vmxnet3_comp_ring *txc;
+ struct vmxnet3_txcompdesc *txcd;
+ struct vmxnet3_txbuf *txb;
+ u_int sop;
+
+ sc = txq->vxtxq_sc;
+ ifp = sc->vmx_ifp;
+ txr = &txq->vxtxq_cmd_ring;
+ txc = &txq->vxtxq_comp_ring;
+
+ VMXNET3_TXQ_LOCK_ASSERT(txq);
+
+ for (;;) {
+ txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
+ if (txcd->gen != txc->vxcr_gen)
+ break;
+
+ if (++txc->vxcr_next == txc->vxcr_ndesc) {
+ txc->vxcr_next = 0;
+ txc->vxcr_gen ^= 1;
+ }
+
+ sop = txr->vxtxr_next;
+ txb = &txr->vxtxr_txbuf[sop];
+
+ if (txb->vtxb_m != NULL) {
+ bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
+
+ m_freem(txb->vtxb_m);
+ txb->vtxb_m = NULL;
+
+ ifp->if_opackets++;
+ }
+
+ txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
+ }
+
+ if (txr->vxtxr_head == txr->vxtxr_next)
+ txq->vxtxq_watchdog = 0;
+}
+
+static int
+vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
+{
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct vmxnet3_rxdesc *rxd;
+ struct vmxnet3_rxbuf *rxb;
+ bus_dma_tag_t tag;
+ bus_dmamap_t dmap;
+ bus_dma_segment_t segs[1];
+ int idx, clsize, btype, flags, nsegs, error;
+
+ ifp = sc->vmx_ifp;
+ tag = rxr->vxrxr_rxtag;
+ dmap = rxr->vxrxr_spare_dmap;
+ idx = rxr->vxrxr_fill;
+ rxd = &rxr->vxrxr_rxd[idx];
+ rxb = &rxr->vxrxr_rxbuf[idx];
+
+#ifdef VMXNET3_FAILPOINTS
+ KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
+ if (rxr->vxrxr_rid != 0)
+ KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
+#endif
+
+ if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
+ flags = M_PKTHDR;
+ clsize = MCLBYTES;
+ btype = VMXNET3_BTYPE_HEAD;
+ } else {
+#if __FreeBSD_version < 902001
+ /*
+ * These mbufs will never be used for the start of a frame.
+ * Roughly prior to branching releng/9.2, the load_mbuf_sg()
+ * required the mbuf to always be a packet header. Avoid
+ * unnecessary mbuf initialization in newer versions where
+ * that is not the case.
+ */
+ flags = M_PKTHDR;
+#else
+ flags = 0;
+#endif
+ clsize = MJUMPAGESIZE;
+ btype = VMXNET3_BTYPE_BODY;
+ }
+
+ m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
+ if (m == NULL) {
+ sc->vmx_stats.vmst_mgetcl_failed++;
+ return (ENOBUFS);
+ }
+
+ if (btype == VMXNET3_BTYPE_HEAD) {
+ m->m_len = m->m_pkthdr.len = clsize;
+ m_adj(m, ETHER_ALIGN);
+ } else
+ m->m_len = clsize;
+
+ error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
+ BUS_DMA_NOWAIT);
+ if (error) {
+ m_freem(m);
+ sc->vmx_stats.vmst_mbuf_load_failed++;;
+ return (error);
+ }
+ KASSERT(nsegs == 1,
+ ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
+#if __FreeBSD_version < 902001
+ if (btype == VMXNET3_BTYPE_BODY)
+ m->m_flags &= ~M_PKTHDR;
+#endif
+
+ if (rxb->vrxb_m != NULL) {
+ bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(tag, rxb->vrxb_dmamap);
+ }
+
+ rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
+ rxb->vrxb_dmamap = dmap;
+ rxb->vrxb_m = m;
+
+ rxd->addr = segs[0].ds_addr;
+ rxd->len = segs[0].ds_len;
+ rxd->btype = btype;
+ rxd->gen = rxr->vxrxr_gen;
+
+ vmxnet3_rxr_increment_fill(rxr);
+ return (0);
+}
+
+static void
+vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
+ struct vmxnet3_rxring *rxr, int idx)
+{
+ struct vmxnet3_rxdesc *rxd;
+
+ rxd = &rxr->vxrxr_rxd[idx];
+ rxd->gen = rxr->vxrxr_gen;
+ vmxnet3_rxr_increment_fill(rxr);
+}
+
+static void
+vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
+{
+ struct vmxnet3_softc *sc;
+ struct vmxnet3_rxring *rxr;
+ struct vmxnet3_comp_ring *rxc;
+ struct vmxnet3_rxcompdesc *rxcd;
+ int idx, eof;
+
+ sc = rxq->vxrxq_sc;
+ rxc = &rxq->vxrxq_comp_ring;
+
+ do {
+ rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
+ if (rxcd->gen != rxc->vxcr_gen)
+ break; /* Not expected. */
+ vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
+
+ if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
+ rxc->vxcr_next = 0;
+ rxc->vxcr_gen ^= 1;
+ }
+
+ idx = rxcd->rxd_idx;
+ eof = rxcd->eop;
+ if (rxcd->qid < sc->vmx_nrxqueues)
+ rxr = &rxq->vxrxq_cmd_ring[0];
+ else
+ rxr = &rxq->vxrxq_cmd_ring[1];
+ vmxnet3_rxq_eof_discard(rxq, rxr, idx);
+ } while (!eof);
+}
+
+static void
+vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
+{
+
+ if (rxcd->ipv4) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if (rxcd->ipcsum_ok)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+
+ if (!rxcd->fragment) {
+ if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+ }
+ }
+}
+
+static void
+vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
+ struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
+{
+ struct vmxnet3_softc *sc;
+ struct ifnet *ifp;
+
+ sc = rxq->vxrxq_sc;
+ ifp = sc->vmx_ifp;
+
+ if (rxcd->error) {
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+
+ if (!rxcd->no_csum)
+ vmxnet3_rx_csum(rxcd, m);
+ if (rxcd->vlan) {
+ m->m_flags |= M_VLANTAG;
+ m->m_pkthdr.ether_vtag = rxcd->vtag;
+ }
+
+ ifp->if_ipackets++;
+ VMXNET3_RXQ_UNLOCK(rxq);
+ (*ifp->if_input)(ifp, m);
+ VMXNET3_RXQ_LOCK(rxq);
+}
+
+static void
+vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
+{
+ struct vmxnet3_softc *sc;
+ struct ifnet *ifp;
+ struct vmxnet3_rxring *rxr;
+ struct vmxnet3_comp_ring *rxc;
+ struct vmxnet3_rxdesc *rxd;
+ struct vmxnet3_rxcompdesc *rxcd;
+ struct mbuf *m, *m_head, *m_tail;
+ int idx, length;
+
+ sc = rxq->vxrxq_sc;
+ ifp = sc->vmx_ifp;
+ rxc = &rxq->vxrxq_comp_ring;
+ m_head = m_tail = NULL;
+
+ VMXNET3_RXQ_LOCK_ASSERT(rxq);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ for (;;) {
+ rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
+ if (rxcd->gen != rxc->vxcr_gen)
+ break;
+ vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
+
+ if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
+ rxc->vxcr_next = 0;
+ rxc->vxcr_gen ^= 1;
+ }
+
+ idx = rxcd->rxd_idx;
+ length = rxcd->len;
+ if (rxcd->qid < sc->vmx_nrxqueues)
+ rxr = &rxq->vxrxq_cmd_ring[0];
+ else
+ rxr = &rxq->vxrxq_cmd_ring[1];
+ rxd = &rxr->vxrxr_rxd[idx];
+
+ m = rxr->vxrxr_rxbuf[idx].vrxb_m;
+ KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
+ __func__, rxcd->qid, idx));
+
+ /*
+ * The host may skip descriptors. We detect this when this
+ * descriptor does not match the previous fill index. Catch
+ * up with the host now.
+ */
+ if (__predict_false(rxr->vxrxr_fill != idx)) {
+ while (rxr->vxrxr_fill != idx) {
+ rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
+ rxr->vxrxr_gen;
+ vmxnet3_rxr_increment_fill(rxr);
+ }
+ }
+
+ if (rxcd->sop) {
+ KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
+ ("%s: start of frame w/o head buffer", __func__));
+ KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
+ ("%s: start of frame not in ring 0", __func__));
+ KASSERT((idx % sc->vmx_rx_max_chain) == 0,
+ ("%s: start of frame at unexcepted index %d (%d)",
+ __func__, idx, sc->vmx_rx_max_chain));
+ KASSERT(m_head == NULL,
+ ("%s: duplicate start of frame?", __func__));
+
+ if (length == 0) {
+ /* Just ignore this descriptor. */
+ vmxnet3_rxq_eof_discard(rxq, rxr, idx);
+ goto nextp;
+ }
+
+ if (vmxnet3_newbuf(sc, rxr) != 0) {
+ ifp->if_iqdrops++;
+ vmxnet3_rxq_eof_discard(rxq, rxr, idx);
+ if (!rxcd->eop)
+ vmxnet3_rxq_discard_chain(rxq);
+ goto nextp;
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = length;
+ m->m_pkthdr.csum_flags = 0;
+ m_head = m_tail = m;
+
+ } else {
+ KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
+ ("%s: non start of frame w/o body buffer", __func__));
+ KASSERT(m_head != NULL,
+ ("%s: frame not started?", __func__));
+
+ if (vmxnet3_newbuf(sc, rxr) != 0) {
+ ifp->if_iqdrops++;
+ vmxnet3_rxq_eof_discard(rxq, rxr, idx);
+ if (!rxcd->eop)
+ vmxnet3_rxq_discard_chain(rxq);
+ m_freem(m_head);
+ m_head = m_tail = NULL;
+ goto nextp;
+ }
+
+ m->m_len = length;
+ m_head->m_pkthdr.len += length;
+ m_tail->m_next = m;
+ m_tail = m;
+ }
+
+ if (rxcd->eop) {
+ vmxnet3_rxq_input(rxq, rxcd, m_head);
+ m_head = m_tail = NULL;
+
+ /* Must recheck after dropping the Rx lock. */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ }
+
+nextp:
+ if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
+ int qid = rxcd->qid;
+ bus_size_t r;
+
+ idx = (idx + 1) % rxr->vxrxr_ndesc;
+ if (qid >= sc->vmx_nrxqueues) {
+ qid -= sc->vmx_nrxqueues;
+ r = VMXNET3_BAR0_RXH2(qid);
+ } else
+ r = VMXNET3_BAR0_RXH1(qid);
+ vmxnet3_write_bar0(sc, r, idx);
+ }
+ }
+}
+
+static void
+vmxnet3_legacy_intr(void *xsc)
+{
+ struct vmxnet3_softc *sc;
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_txqueue *txq;
+ struct ifnet *ifp;
+
+ sc = xsc;
+ rxq = &sc->vmx_rxq[0];
+ txq = &sc->vmx_txq[0];
+ ifp = sc->vmx_ifp;
+
+ if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
+ if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
+ return;
+ }
+ if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_all_intrs(sc);
+
+ if (sc->vmx_ds->event != 0)
+ vmxnet3_evintr(sc);
+
+ VMXNET3_RXQ_LOCK(rxq);
+ vmxnet3_rxq_eof(rxq);
+ VMXNET3_RXQ_UNLOCK(rxq);
+
+ VMXNET3_TXQ_LOCK(txq);
+ vmxnet3_txq_eof(txq);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ vmxnet3_start_locked(ifp);
+ VMXNET3_TXQ_UNLOCK(txq);
+
+ vmxnet3_enable_all_intrs(sc);
+}
+
+static void
+vmxnet3_txq_intr(void *xtxq)
+{
+ struct vmxnet3_softc *sc;
+ struct vmxnet3_txqueue *txq;
+ struct ifnet *ifp;
+
+ txq = xtxq;
+ sc = txq->vxtxq_sc;
+ ifp = sc->vmx_ifp;
+
+ if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
+
+ VMXNET3_TXQ_LOCK(txq);
+ vmxnet3_txq_eof(txq);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ vmxnet3_start_locked(ifp);
+ VMXNET3_TXQ_UNLOCK(txq);
+
+ vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
+}
+
+static void
+vmxnet3_rxq_intr(void *xrxq)
+{
+ struct vmxnet3_softc *sc;
+ struct vmxnet3_rxqueue *rxq;
+
+ rxq = xrxq;
+ sc = rxq->vxrxq_sc;
+
+ if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
+
+ VMXNET3_RXQ_LOCK(rxq);
+ vmxnet3_rxq_eof(rxq);
+ VMXNET3_RXQ_UNLOCK(rxq);
+
+ vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
+}
+
+static void
+vmxnet3_event_intr(void *xsc)
+{
+ struct vmxnet3_softc *sc;
+
+ sc = xsc;
+
+ if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
+
+ if (sc->vmx_ds->event != 0)
+ vmxnet3_evintr(sc);
+
+ vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
+}
+
+static void
+vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
+{
+ struct vmxnet3_txring *txr;
+ struct vmxnet3_txbuf *txb;
+ int i;
+
+ txr = &txq->vxtxq_cmd_ring;
+
+ for (i = 0; i < txr->vxtxr_ndesc; i++) {
+ txb = &txr->vxtxr_txbuf[i];
+
+ if (txb->vtxb_m == NULL)
+ continue;
+
+ bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
+ m_freem(txb->vtxb_m);
+ txb->vtxb_m = NULL;
+ }
+}
+
+static void
+vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
+{
+ struct vmxnet3_rxring *rxr;
+ struct vmxnet3_rxbuf *rxb;
+ int i, j;
+
+ for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+
+ for (j = 0; j < rxr->vxrxr_ndesc; j++) {
+ rxb = &rxr->vxrxr_rxbuf[j];
+
+ if (rxb->vrxb_m == NULL)
+ continue;
+ bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
+ m_freem(rxb->vrxb_m);
+ rxb->vrxb_m = NULL;
+ }
+ }
+}
+
+static void
+vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
+{
+ struct vmxnet3_rxqueue *rxq;
+ struct vmxnet3_txqueue *txq;
+ int i;
+
+ for (i = 0; i < sc->vmx_nrxqueues; i++) {
+ rxq = &sc->vmx_rxq[i];
+ VMXNET3_RXQ_LOCK(rxq);
+ VMXNET3_RXQ_UNLOCK(rxq);
+ }
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++) {
+ txq = &sc->vmx_txq[i];
+ VMXNET3_TXQ_LOCK(txq);
+ VMXNET3_TXQ_UNLOCK(txq);
+ }
+}
+
+static void
+vmxnet3_stop(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+ int q;
+
+ ifp = sc->vmx_ifp;
+ VMXNET3_CORE_LOCK_ASSERT(sc);
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->vmx_link_active = 0;
+ callout_stop(&sc->vmx_tick);
+
+ /* Disable interrupts. */
+ vmxnet3_disable_all_intrs(sc);
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
+
+ vmxnet3_stop_rendezvous(sc);
+
+ for (q = 0; q < sc->vmx_ntxqueues; q++)
+ vmxnet3_txstop(sc, &sc->vmx_txq[q]);
+ for (q = 0; q < sc->vmx_nrxqueues; q++)
+ vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
+
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
+}
+
+static void
+vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
+{
+ struct vmxnet3_txring *txr;
+ struct vmxnet3_comp_ring *txc;
+
+ txr = &txq->vxtxq_cmd_ring;
+ txr->vxtxr_head = 0;
+ txr->vxtxr_next = 0;
+ txr->vxtxr_gen = VMXNET3_INIT_GEN;
+ bzero(txr->vxtxr_txd,
+ txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
+
+ txc = &txq->vxtxq_comp_ring;
+ txc->vxcr_next = 0;
+ txc->vxcr_gen = VMXNET3_INIT_GEN;
+ bzero(txc->vxcr_u.txcd,
+ txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
+}
+
+static int
+vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
+{
+ struct ifnet *ifp;
+ struct vmxnet3_rxring *rxr;
+ struct vmxnet3_comp_ring *rxc;
+ int i, populate, idx, frame_size, error;
+
+ ifp = sc->vmx_ifp;
+ frame_size = ifp->if_mtu + sizeof(struct ether_vlan_header);
+
+ /*
+ * If the MTU causes us to exceed what a regular sized cluster
+ * can handle, we allocate a second MJUMPAGESIZE cluster after
+ * it in ring 0. If in use, ring 1 always contains MJUMPAGESIZE
+ * clusters.
+ *
+ * Keep rx_max_chain a divisor of the maximum Rx ring size to
+ * to make our life easier. We do not support changing the ring
+ * size after the attach.
+ */
+ if (frame_size <= MCLBYTES - ETHER_ALIGN)
+ sc->vmx_rx_max_chain = 1;
+ else
+ sc->vmx_rx_max_chain = 2;
+
+ /*
+ * Only populate ring 1 if the configuration will take advantage
+ * of it. That is either when LRO is enabled or the frame size
+ * exceeds what ring 0 can contain.
+ */
+ if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
+ frame_size <= MCLBYTES + MJUMPAGESIZE)
+ populate = 1;
+ else
+ populate = VMXNET3_RXRINGS_PERQ;
+
+ for (i = 0; i < populate; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+ rxr->vxrxr_fill = 0;
+ rxr->vxrxr_gen = VMXNET3_INIT_GEN;
+ bzero(rxr->vxrxr_rxd,
+ rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
+
+ for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
+ error = vmxnet3_newbuf(sc, rxr);
+ if (error)
+ return (error);
+ }
+ }
+
+ for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
+ rxr = &rxq->vxrxq_cmd_ring[i];
+ rxr->vxrxr_fill = 0;
+ rxr->vxrxr_gen = 0;
+ bzero(rxr->vxrxr_rxd,
+ rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
+ }
+
+ rxc = &rxq->vxrxq_comp_ring;
+ rxc->vxcr_next = 0;
+ rxc->vxcr_gen = VMXNET3_INIT_GEN;
+ bzero(rxc->vxcr_u.rxcd,
+ rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
+
+ return (0);
+}
+
+static int
+vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ int q, error;
+
+ dev = sc->vmx_dev;
+
+ for (q = 0; q < sc->vmx_ntxqueues; q++)
+ vmxnet3_txinit(sc, &sc->vmx_txq[q]);
+
+ for (q = 0; q < sc->vmx_nrxqueues; q++) {
+ error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
+ if (error) {
+ device_printf(dev, "cannot populate Rx queue %d\n", q);
+ return (error);
+ }
+ }
+
+ return (0);
+}
+
+static int
+vmxnet3_enable_device(struct vmxnet3_softc *sc)
+{
+ int q;
+
+ if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
+ device_printf(sc->vmx_dev, "device enable command failed!\n");
+ return (1);
+ }
+
+ /* Reset the Rx queue heads. */
+ for (q = 0; q < sc->vmx_nrxqueues; q++) {
+ vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
+ vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
+ }
+
+ return (0);
+}
+
+static void
+vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->vmx_ifp;
+
+ vmxnet3_set_rxfilter(sc);
+
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
+ sizeof(sc->vmx_ds->vlan_filter));
+ else
+ bzero(sc->vmx_ds->vlan_filter,
+ sizeof(sc->vmx_ds->vlan_filter));
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
+}
+
+static int
+vmxnet3_reinit(struct vmxnet3_softc *sc)
+{
+
+ vmxnet3_reinit_interface(sc);
+ vmxnet3_reinit_shared_data(sc);
+
+ if (vmxnet3_reinit_queues(sc) != 0)
+ return (ENXIO);
+
+ if (vmxnet3_enable_device(sc) != 0)
+ return (ENXIO);
+
+ vmxnet3_reinit_rxfilters(sc);
+
+ return (0);
+}
+
+static void
+vmxnet3_init_locked(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->vmx_ifp;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ vmxnet3_stop(sc);
+
+ if (vmxnet3_reinit(sc) != 0) {
+ vmxnet3_stop(sc);
+ return;
+ }
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ vmxnet3_link_status(sc);
+
+ vmxnet3_enable_all_intrs(sc);
+ callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
+}
+
+static void
+vmxnet3_init(void *xsc)
+{
+ struct vmxnet3_softc *sc;
+
+ sc = xsc;
+
+ VMXNET3_CORE_LOCK(sc);
+ vmxnet3_init_locked(sc);
+ VMXNET3_CORE_UNLOCK(sc);
+}
+
+/*
+ * BMV: Much of this can go away once we finally have offsets in
+ * the mbuf packet header. Bug andre@.
+ */
+static int
+vmxnet3_txq_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start)
+{
+ struct ether_vlan_header *evh;
+ int offset;
+
+ evh = mtod(m, struct ether_vlan_header *);
+ if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ /* BMV: We should handle nested VLAN tags too. */
+ *etype = ntohs(evh->evl_proto);
+ offset = sizeof(struct ether_vlan_header);
+ } else {
+ *etype = ntohs(evh->evl_encap_proto);
+ offset = sizeof(struct ether_header);
+ }
+
+ switch (*etype) {
+#if defined(INET)
+ case ETHERTYPE_IP: {
+ struct ip *ip, iphdr;
+ if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
+ m_copydata(m, offset, sizeof(struct ip),
+ (caddr_t) &iphdr);
+ ip = &iphdr;
+ } else
+ ip = (struct ip *)(m->m_data + offset);
+ *proto = ip->ip_p;
+ *start = offset + (ip->ip_hl << 2);
+ break;
+ }
+#endif
+#if defined(INET6)
+ case ETHERTYPE_IPV6:
+ *proto = -1;
+ *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
+ /* Assert the network stack sent us a valid packet. */
+ KASSERT(*start > offset,
+ ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
+ *start, offset, *proto));
+ break;
+#endif
+ default:
+ return (EINVAL);
+ }
+
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ struct tcphdr *tcp, tcphdr;
+
+ if (__predict_false(*proto != IPPROTO_TCP)) {
+ /* Likely failed to correctly parse the mbuf. */
+ return (EINVAL);
+ }
+
+ if (m->m_len < *start + sizeof(struct tcphdr)) {
+ m_copydata(m, offset, sizeof(struct tcphdr),
+ (caddr_t) &tcphdr);
+ tcp = &tcphdr;
+ } else
+ tcp = (struct tcphdr *)(m->m_data + *start);
+
+ /*
+ * For TSO, the size of the protocol header is also
+ * included in the descriptor header size.
+ */
+ *start += (tcp->th_off << 2);
+ }
+
+ return (0);
+}
+
+static int
+vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
+ bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
+{
+ struct vmxnet3_txring *txr;
+ struct mbuf *m;
+ bus_dma_tag_t tag;
+ int maxsegs, error;
+
+ txr = &txq->vxtxq_cmd_ring;
+ m = *m0;
+ tag = txr->vxtxr_txtag;
+ maxsegs = VMXNET3_TX_MAXSEGS;
+
+ error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
+ if (error == 0 || error != EFBIG)
+ return (error);
+
+ m = m_collapse(m, M_NOWAIT, maxsegs);
+ if (m != NULL) {
+ *m0 = m;
+ error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
+ } else
+ error = ENOBUFS;
+
+ if (error) {
+ m_freem(*m0);
+ *m0 = NULL;
+ } else
+ txq->vxtxq_sc->vmx_stats.vmst_collapsed++;
+
+ return (error);
+}
+
+static void
+vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
+{
+ struct vmxnet3_txring *txr;
+
+ txr = &txq->vxtxq_cmd_ring;
+ bus_dmamap_unload(txr->vxtxr_txtag, dmap);
+}
+
+static int
+vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
+{
+ struct vmxnet3_softc *sc;
+ struct ifnet *ifp;
+ struct vmxnet3_txring *txr;
+ struct vmxnet3_txdesc *txd, *sop;
+ struct mbuf *m;
+ bus_dmamap_t dmap;
+ bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
+ int i, gen, nsegs, etype, proto, start, error;
+
+ sc = txq->vxtxq_sc;
+ ifp = sc->vmx_ifp;
+ start = 0;
+ txd = NULL;
+ txr = &txq->vxtxq_cmd_ring;
+ dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
+
+ error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
+ if (error)
+ return (error);
+
+ m = *m0;
+ M_ASSERTPKTHDR(m);
+ KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
+ ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
+
+ if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
+ txq->vxtxq_stats.vtxrs_full++;
+ vmxnet3_txq_unload_mbuf(txq, dmap);
+ return (ENOSPC);
+ } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
+ error = vmxnet3_txq_offload_ctx(m, &etype, &proto, &start);
+ if (error) {
+ txq->vxtxq_stats.vtxrs_offload_failed++;
+ vmxnet3_txq_unload_mbuf(txq, dmap);
+ m_freem(m);
+ *m0 = NULL;
+ return (error);
+ }
+ }
+
+ txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m = *m0;
+ sop = &txr->vxtxr_txd[txr->vxtxr_head];
+ gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
+
+ for (i = 0; i < nsegs; i++) {
+ txd = &txr->vxtxr_txd[txr->vxtxr_head];
+
+ txd->addr = segs[i].ds_addr;
+ txd->len = segs[i].ds_len;
+ txd->gen = gen;
+ txd->dtype = 0;
+ txd->offload_mode = VMXNET3_OM_NONE;
+ txd->offload_pos = 0;
+ txd->hlen = 0;
+ txd->eop = 0;
+ txd->compreq = 0;
+ txd->vtag_mode = 0;
+ txd->vtag = 0;
+
+ if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
+ txr->vxtxr_head = 0;
+ txr->vxtxr_gen ^= 1;
+ }
+ gen = txr->vxtxr_gen;
+ }
+ txd->eop = 1;
+ txd->compreq = 1;
+
+ if (m->m_flags & M_VLANTAG) {
+ sop->vtag_mode = 1;
+ sop->vtag = m->m_pkthdr.ether_vtag;
+ }
+
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ sop->offload_mode = VMXNET3_OM_TSO;
+ sop->hlen = start;
+ sop->offload_pos = m->m_pkthdr.tso_segsz;
+ } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
+ VMXNET3_CSUM_OFFLOAD_IPV6)) {
+ sop->offload_mode = VMXNET3_OM_CSUM;
+ sop->hlen = start;
+ sop->offload_pos = start + m->m_pkthdr.csum_data;
+ }
+
+ /* Finally, change the ownership. */
+ vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
+ sop->gen ^= 1;
+
+ if (++txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
+ txq->vxtxq_ts->npending = 0;
+ vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
+ txr->vxtxr_head);
+ }
+
+ return (0);
+}
+
+static void
+vmxnet3_start_locked(struct ifnet *ifp)
+{
+ struct vmxnet3_softc *sc;
+ struct vmxnet3_txqueue *txq;
+ struct vmxnet3_txring *txr;
+ struct mbuf *m_head;
+ int tx;
+
+ sc = ifp->if_softc;
+ txq = &sc->vmx_txq[0];
+ txr = &txq->vxtxq_cmd_ring;
+ tx = 0;
+
+ VMXNET3_TXQ_LOCK_ASSERT(txq);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ sc->vmx_link_active == 0)
+ return;
+
+ while (VMXNET3_TXRING_AVAIL(txr) > 0) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+
+ if (vmxnet3_txq_encap(txq, &m_head) != 0) {
+ if (m_head != NULL)
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ break;
+ }
+
+ tx++;
+ ETHER_BPF_MTAP(ifp, m_head);
+ }
+
+ if (tx > 0) {
+ if (txq->vxtxq_ts->npending > 0) {
+ txq->vxtxq_ts->npending = 0;
+ vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
+ txr->vxtxr_head);
+ }
+ txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
+ }
+}
+
+static void
+vmxnet3_start(struct ifnet *ifp)
+{
+ struct vmxnet3_softc *sc;
+ struct vmxnet3_txqueue *txq;
+
+ sc = ifp->if_softc;
+ txq = &sc->vmx_txq[0];
+
+ VMXNET3_TXQ_LOCK(txq);
+ vmxnet3_start_locked(ifp);
+ VMXNET3_TXQ_UNLOCK(txq);
+}
+
+static void
+vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
+{
+ struct ifnet *ifp;
+ int idx, bit;
+
+ ifp = sc->vmx_ifp;
+ idx = (tag >> 5) & 0x7F;
+ bit = tag & 0x1F;
+
+ if (tag == 0 || tag > 4095)
+ return;
+
+ VMXNET3_CORE_LOCK(sc);
+
+ /* Update our private VLAN bitvector. */
+ if (add)
+ sc->vmx_vlan_filter[idx] |= (1 << bit);
+ else
+ sc->vmx_vlan_filter[idx] &= ~(1 << bit);
+
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+ if (add)
+ sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
+ else
+ sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
+ }
+
+ VMXNET3_CORE_UNLOCK(sc);
+}
+
+static void
+vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
+{
+
+ if (ifp->if_softc == arg)
+ vmxnet3_update_vlan_filter(arg, 1, tag);
+}
+
+static void
+vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
+{
+
+ if (ifp->if_softc == arg)
+ vmxnet3_update_vlan_filter(arg, 0, tag);
+}
+
+static void
+vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+ struct vmxnet3_driver_shared *ds;
+ struct ifmultiaddr *ifma;
+ u_int mode;
+
+ ifp = sc->vmx_ifp;
+ ds = sc->vmx_ds;
+
+ mode = VMXNET3_RXMODE_UCAST;
+ if (ifp->if_flags & IFF_BROADCAST)
+ mode |= VMXNET3_RXMODE_BCAST;
+ if (ifp->if_flags & IFF_PROMISC)
+ mode |= VMXNET3_RXMODE_PROMISC;
+ if (ifp->if_flags & IFF_ALLMULTI)
+ mode |= VMXNET3_RXMODE_ALLMULTI;
+ else {
+ int cnt = 0, overflow = 0;
+
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ else if (cnt == VMXNET3_MULTICAST_MAX) {
+ overflow = 1;
+ break;
+ }
+
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
+ cnt++;
+ }
+ if_maddr_runlock(ifp);
+
+ if (overflow != 0) {
+ cnt = 0;
+ mode |= VMXNET3_RXMODE_ALLMULTI;
+ } else if (cnt > 0)
+ mode |= VMXNET3_RXMODE_MCAST;
+ ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
+ }
+
+ ds->rxmode = mode;
+
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
+}
+
+static int
+vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->vmx_ifp;
+
+ if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
+ return (EINVAL);
+
+ ifp->if_mtu = mtu;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vmxnet3_init_locked(sc);
+ }
+
+ return (0);
+}
+
+static int
+vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct vmxnet3_softc *sc;
+ struct ifreq *ifr;
+ int reinit, mask, error;
+
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *) data;
+ error = 0;
+
+ switch (cmd) {
+ case SIOCSIFMTU:
+ if (ifp->if_mtu != ifr->ifr_mtu) {
+ VMXNET3_CORE_LOCK(sc);
+ error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
+ VMXNET3_CORE_UNLOCK(sc);
+ }
+ break;
+
+ case SIOCSIFFLAGS:
+ VMXNET3_CORE_LOCK(sc);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ sc->vmx_if_flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ vmxnet3_set_rxfilter(sc);
+ }
+ } else
+ vmxnet3_init_locked(sc);
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ vmxnet3_stop(sc);
+ }
+ sc->vmx_if_flags = ifp->if_flags;
+ VMXNET3_CORE_UNLOCK(sc);
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ VMXNET3_CORE_LOCK(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ vmxnet3_set_rxfilter(sc);
+ VMXNET3_CORE_UNLOCK(sc);
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ VMXNET3_CORE_LOCK(sc);
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+ if (mask & IFCAP_TXCSUM)
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if (mask & IFCAP_TXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
+ if (mask & IFCAP_TSO4)
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_TSO6)
+ ifp->if_capenable ^= IFCAP_TSO6;
+
+ if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
+ IFCAP_VLAN_HWFILTER)) {
+ /* These Rx features require us to renegotiate. */
+ reinit = 1;
+
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWFILTER)
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ } else
+ reinit = 0;
+
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+ if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vmxnet3_init_locked(sc);
+ }
+
+ VMXNET3_CORE_UNLOCK(sc);
+ VLAN_CAPABILITIES(ifp);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
+
+ return (error);
+}
+
+static int
+vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
+{
+ struct vmxnet3_softc *sc;
+
+ sc = txq->vxtxq_sc;
+
+ VMXNET3_TXQ_LOCK(txq);
+ if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
+ VMXNET3_TXQ_UNLOCK(txq);
+ return (0);
+ }
+ VMXNET3_TXQ_UNLOCK(txq);
+
+ if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
+ txq->vxtxq_id);
+ return (1);
+}
+
+static void
+vmxnet3_refresh_stats(struct vmxnet3_softc *sc)
+{
+
+ vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
+}
+
+static void
+vmxnet3_tick(void *xsc)
+{
+ struct vmxnet3_softc *sc;
+ struct ifnet *ifp;
+ int i, timedout;
+
+ sc = xsc;
+ ifp = sc->vmx_ifp;
+ timedout = 0;
+
+ VMXNET3_CORE_LOCK_ASSERT(sc);
+ vmxnet3_refresh_stats(sc);
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++)
+ timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
+
+ if (timedout != 0) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vmxnet3_init_locked(sc);
+ } else
+ callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
+}
+
+static int
+vmxnet3_link_is_up(struct vmxnet3_softc *sc)
+{
+ uint32_t status;
+
+ /* Also update the link speed while here. */
+ status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
+ sc->vmx_link_speed = status >> 16;
+ return !!(status & 0x1);
+}
+
+static void
+vmxnet3_link_status(struct vmxnet3_softc *sc)
+{
+ struct ifnet *ifp;
+ int link;
+
+ ifp = sc->vmx_ifp;
+ link = vmxnet3_link_is_up(sc);
+
+ if (link != 0 && sc->vmx_link_active == 0) {
+ sc->vmx_link_active = 1;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ } else if (link == 0 && sc->vmx_link_active != 0) {
+ sc->vmx_link_active = 0;
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ }
+}
+
+static void
+vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct vmxnet3_softc *sc;
+
+ sc = ifp->if_softc;
+
+ ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
+ ifmr->ifm_status = IFM_AVALID;
+
+ VMXNET3_CORE_LOCK(sc);
+ if (vmxnet3_link_is_up(sc) != 0)
+ ifmr->ifm_status |= IFM_ACTIVE;
+ else
+ ifmr->ifm_status |= IFM_NONE;
+ VMXNET3_CORE_UNLOCK(sc);
+}
+
+static int
+vmxnet3_media_change(struct ifnet *ifp)
+{
+
+ /* Ignore. */
+ return (0);
+}
+
+static void
+vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
+{
+ uint32_t ml, mh;
+
+ ml = sc->vmx_lladdr[0];
+ ml |= sc->vmx_lladdr[1] << 8;
+ ml |= sc->vmx_lladdr[2] << 16;
+ ml |= sc->vmx_lladdr[3] << 24;
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
+
+ mh = sc->vmx_lladdr[4];
+ mh |= sc->vmx_lladdr[5] << 8;
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
+}
+
+static void
+vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
+{
+ uint32_t ml, mh;
+
+ ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
+ mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
+
+ sc->vmx_lladdr[0] = ml;
+ sc->vmx_lladdr[1] = ml >> 8;
+ sc->vmx_lladdr[2] = ml >> 16;
+ sc->vmx_lladdr[3] = ml >> 24;
+ sc->vmx_lladdr[4] = mh;
+ sc->vmx_lladdr[5] = mh >> 8;
+}
+
+static void
+vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
+{
+ struct sysctl_oid *node, *txsnode;
+ struct sysctl_oid_list *list, *txslist;
+ struct vmxnet3_txq_stats *stats;
+ struct UPT1_TxStats *txstats;
+ char namebuf[16];
+
+ stats = &txq->vxtxq_stats;
+ txstats = &txq->vxtxq_ts->stats;
+
+ snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
+ node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
+ NULL, "Transmit Queue");
+ txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
+
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
+ &stats->vtxrs_full, "Tx ring full");
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
+ &stats->vtxrs_offload_failed, "Tx checksum offload failed");
+
+ /*
+ * Add statistics reported by the host. These are updated once
+ * per second.
+ */
+ txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
+ NULL, "Host Statistics");
+ txslist = SYSCTL_CHILDREN(txsnode);
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
+ &txstats->TSO_packets, "TSO packets");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
+ &txstats->TSO_bytes, "TSO bytes");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
+ &txstats->ucast_packets, "Unicast packets");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
+ &txstats->ucast_bytes, "Unicast bytes");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
+ &txstats->mcast_packets, "Multicast packets");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
+ &txstats->mcast_bytes, "Multicast bytes");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
+ &txstats->error, "Errors");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
+ &txstats->discard, "Discards");
+}
+
+static void
+vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
+{
+ struct sysctl_oid *node, *rxsnode;
+ struct sysctl_oid_list *list, *rxslist;
+ struct vmxnet3_rxq_stats *stats;
+ struct UPT1_RxStats *rxstats;
+ char namebuf[16];
+
+ stats = &rxq->vxrxq_stats;
+ rxstats = &rxq->vxrxq_rs->stats;
+
+ snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
+ node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
+ NULL, "Receive Queue");
+ rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
+
+ /*
+ * Add statistics reported by the host. These are updated once
+ * per second.
+ */
+ rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
+ NULL, "Host Statistics");
+ rxslist = SYSCTL_CHILDREN(rxsnode);
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
+ &rxstats->LRO_packets, "LRO packets");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
+ &rxstats->LRO_bytes, "LRO bytes");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
+ &rxstats->ucast_packets, "Unicast packets");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
+ &rxstats->ucast_bytes, "Unicast bytes");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
+ &rxstats->mcast_packets, "Multicast packets");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
+ &rxstats->mcast_bytes, "Multicast bytes");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
+ &rxstats->bcast_packets, "Broadcast packets");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
+ &rxstats->bcast_bytes, "Broadcast bytes");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
+ &rxstats->nobuffer, "No buffer");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
+ &rxstats->error, "Errors");
+}
+
+#ifdef VMXNET3_DEBUG_SYSCTL
+static void
+vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
+{
+ struct sysctl_oid *node;
+ struct sysctl_oid_list *list;
+ int i;
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++) {
+ struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
+
+ node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
+ "debug", CTLFLAG_RD, NULL, "");
+ list = SYSCTL_CHILDREN(node);
+
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
+ &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
+ &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
+ &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
+ SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
+ &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
+ &txq->vxtxq_comp_ring.vxcr_next, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
+ &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
+ SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
+ &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
+ }
+
+ for (i = 0; i < sc->vmx_nrxqueues; i++) {
+ struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
+
+ node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
+ "debug", CTLFLAG_RD, NULL, "");
+ list = SYSCTL_CHILDREN(node);
+
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
+ &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
+ &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
+ SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
+ &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
+ &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
+ &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
+ SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
+ &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
+ &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
+ &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
+ SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
+ &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
+ }
+}
+#endif
+
+static void
+vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
+{
+ int i;
+
+ for (i = 0; i < sc->vmx_ntxqueues; i++)
+ vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
+ for (i = 0; i < sc->vmx_nrxqueues; i++)
+ vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
+
+#ifdef VMXNET3_DEBUG_SYSCTL
+ vmxnet3_setup_debug_sysctl(sc, ctx, child);
+#endif
+}
+
+static void
+vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
+{
+ device_t dev;
+ struct vmxnet3_statistics *stats;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+
+ dev = sc->vmx_dev;
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
+ &sc->vmx_ntxqueues, 0, "Number of Tx queues");
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
+ &sc->vmx_nrxqueues, 0, "Number of Rx queues");
+
+ stats = &sc->vmx_stats;
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "collapsed", CTLFLAG_RD,
+ &stats->vmst_collapsed, 0, "Tx mbuf chains collapsed");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
+ &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
+ &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
+
+ vmxnet3_setup_queue_sysctl(sc, ctx, child);
+}
+
+static void
+vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
+{
+
+ bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
+}
+
+static uint32_t
+vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
+{
+
+ return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
+}
+
+static void
+vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
+{
+
+ bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
+}
+
+static void
+vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
+{
+
+ vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
+}
+
+static uint32_t
+vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
+{
+
+ vmxnet3_write_cmd(sc, cmd);
+ bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
+}
+
+static void
+vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
+{
+
+ vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
+}
+
+static void
+vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
+{
+
+ vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
+}
+
+static void
+vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
+{
+ int i;
+
+ sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
+ for (i = 0; i < sc->vmx_nintrs; i++)
+ vmxnet3_enable_intr(sc, i);
+}
+
+static void
+vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
+{
+ int i;
+
+ sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
+ for (i = 0; i < sc->vmx_nintrs; i++)
+ vmxnet3_disable_intr(sc, i);
+}
+
+static void
+vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *baddr = arg;
+
+ if (error == 0)
+ *baddr = segs->ds_addr;
+}
+
+static int
+vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
+ struct vmxnet3_dma_alloc *dma)
+{
+ device_t dev;
+ int error;
+
+ dev = sc->vmx_dev;
+ bzero(dma, sizeof(struct vmxnet3_dma_alloc));
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev),
+ align, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &dma->dma_tag);
+ if (error) {
+ device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
+ goto fail;
+ }
+
+ error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
+ BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
+ if (error) {
+ device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
+ goto fail;
+ }
+
+ error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+ size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
+ if (error) {
+ device_printf(dev, "bus_dmamap_load failed: %d\n", error);
+ goto fail;
+ }
+
+ dma->dma_size = size;
+
+fail:
+ if (error)
+ vmxnet3_dma_free(sc, dma);
+
+ return (error);
+}
+
+static void
+vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
+{
+
+ if (dma->dma_tag != NULL) {
+ if (dma->dma_map != NULL) {
+ bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ }
+
+ if (dma->dma_vaddr != NULL) {
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
+ dma->dma_map);
+ }
+
+ bus_dma_tag_destroy(dma->dma_tag);
+ }
+ bzero(dma, sizeof(struct vmxnet3_dma_alloc));
+}
+
+/*
+ * Since this is a purely paravirtualized device, we do not have
+ * to worry about DMA coherency. But at times, we must make sure
+ * both the compiler and CPU do not reorder memory operations.
+ */
+static inline void
+vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
+{
+
+ switch (type) {
+ case VMXNET3_BARRIER_RD:
+ rmb();
+ break;
+ case VMXNET3_BARRIER_WR:
+ wmb();
+ break;
+ case VMXNET3_BARRIER_RDWR:
+ mb();
+ break;
+ default:
+ panic("%s: bad barrier type %d", __func__, type);
+ }
+}
diff --git a/sys/dev/vmware/vmxnet3/if_vmxreg.h b/sys/dev/vmware/vmxnet3/if_vmxreg.h
new file mode 100644
index 0000000..8b554b5
--- /dev/null
+++ b/sys/dev/vmware/vmxnet3/if_vmxreg.h
@@ -0,0 +1,316 @@
+/*-
+ * Copyright (c) 2013 Tsubai Masanari
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $OpenBSD: src/sys/dev/pci/if_vmxreg.h,v 1.2 2013/06/12 01:07:33 uebayasi Exp $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_VMXREG_H
+#define _IF_VMXREG_H
+
+struct UPT1_TxStats {
+ uint64_t TSO_packets;
+ uint64_t TSO_bytes;
+ uint64_t ucast_packets;
+ uint64_t ucast_bytes;
+ uint64_t mcast_packets;
+ uint64_t mcast_bytes;
+ uint64_t bcast_packets;
+ uint64_t bcast_bytes;
+ uint64_t error;
+ uint64_t discard;
+} __packed;
+
+struct UPT1_RxStats {
+ uint64_t LRO_packets;
+ uint64_t LRO_bytes;
+ uint64_t ucast_packets;
+ uint64_t ucast_bytes;
+ uint64_t mcast_packets;
+ uint64_t mcast_bytes;
+ uint64_t bcast_packets;
+ uint64_t bcast_bytes;
+ uint64_t nobuffer;
+ uint64_t error;
+} __packed;
+
+/* Interrupt moderation levels */
+#define UPT1_IMOD_NONE 0 /* No moderation */
+#define UPT1_IMOD_HIGHEST 7 /* Least interrupts */
+#define UPT1_IMOD_ADAPTIVE 8 /* Adaptive interrupt moderation */
+
+/* Hardware features */
+#define UPT1_F_CSUM 0x0001 /* Rx checksum verification */
+#define UPT1_F_RSS 0x0002 /* Receive side scaling */
+#define UPT1_F_VLAN 0x0004 /* VLAN tag stripping */
+#define UPT1_F_LRO 0x0008 /* Large receive offloading */
+
+#define VMXNET3_BAR0_IMASK(irq) (0x000 + (irq) * 8) /* Interrupt mask */
+#define VMXNET3_BAR0_TXH(q) (0x600 + (q) * 8) /* Tx head */
+#define VMXNET3_BAR0_RXH1(q) (0x800 + (q) * 8) /* Ring1 Rx head */
+#define VMXNET3_BAR0_RXH2(q) (0xA00 + (q) * 8) /* Ring2 Rx head */
+#define VMXNET3_BAR1_VRRS 0x000 /* VMXNET3 revision report selection */
+#define VMXNET3_BAR1_UVRS 0x008 /* UPT version report selection */
+#define VMXNET3_BAR1_DSL 0x010 /* Driver shared address low */
+#define VMXNET3_BAR1_DSH 0x018 /* Driver shared address high */
+#define VMXNET3_BAR1_CMD 0x020 /* Command */
+#define VMXNET3_BAR1_MACL 0x028 /* MAC address low */
+#define VMXNET3_BAR1_MACH 0x030 /* MAC address high */
+#define VMXNET3_BAR1_INTR 0x038 /* Interrupt status */
+#define VMXNET3_BAR1_EVENT 0x040 /* Event status */
+
+#define VMXNET3_CMD_ENABLE 0xCAFE0000 /* Enable VMXNET3 */
+#define VMXNET3_CMD_DISABLE 0xCAFE0001 /* Disable VMXNET3 */
+#define VMXNET3_CMD_RESET 0xCAFE0002 /* Reset device */
+#define VMXNET3_CMD_SET_RXMODE 0xCAFE0003 /* Set interface flags */
+#define VMXNET3_CMD_SET_FILTER 0xCAFE0004 /* Set address filter */
+#define VMXNET3_CMD_VLAN_FILTER 0xCAFE0005 /* Set VLAN filter */
+#define VMXNET3_CMD_GET_STATUS 0xF00D0000 /* Get queue errors */
+#define VMXNET3_CMD_GET_STATS 0xF00D0001 /* Get queue statistics */
+#define VMXNET3_CMD_GET_LINK 0xF00D0002 /* Get link status */
+#define VMXNET3_CMD_GET_MACL 0xF00D0003 /* Get MAC address low */
+#define VMXNET3_CMD_GET_MACH 0xF00D0004 /* Get MAC address high */
+#define VMXNET3_CMD_GET_INTRCFG 0xF00D0008 /* Get interrupt config */
+
+#define VMXNET3_DMADESC_ALIGN 128
+#define VMXNET3_INIT_GEN 1
+
+struct vmxnet3_txdesc {
+ uint64_t addr;
+
+ uint32_t len:14;
+ uint32_t gen:1; /* Generation */
+ uint32_t pad1:1;
+ uint32_t dtype:1; /* Descriptor type */
+ uint32_t pad2:1;
+ uint32_t offload_pos:14; /* Offloading position */
+
+ uint32_t hlen:10; /* Header len */
+ uint32_t offload_mode:2; /* Offloading mode */
+ uint32_t eop:1; /* End of packet */
+ uint32_t compreq:1; /* Completion request */
+ uint32_t pad3:1;
+ uint32_t vtag_mode:1; /* VLAN tag insertion mode */
+ uint32_t vtag:16; /* VLAN tag */
+} __packed;
+
+/* Offloading modes */
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
+
+struct vmxnet3_txcompdesc {
+ uint32_t eop_idx:12; /* EOP index in Tx ring */
+ uint32_t pad1:20;
+
+ uint32_t pad2:32;
+ uint32_t pad3:32;
+
+ uint32_t rsvd:24;
+ uint32_t type:7;
+ uint32_t gen:1;
+} __packed;
+
+struct vmxnet3_rxdesc {
+ uint64_t addr;
+
+ uint32_t len:14;
+ uint32_t btype:1; /* Buffer type */
+ uint32_t dtype:1; /* Descriptor type */
+ uint32_t rsvd:15;
+ uint32_t gen:1;
+
+ uint32_t pad1:32;
+} __packed;
+
+/* Buffer types */
+#define VMXNET3_BTYPE_HEAD 0 /* Head only */
+#define VMXNET3_BTYPE_BODY 1 /* Body only */
+
+struct vmxnet3_rxcompdesc {
+ uint32_t rxd_idx:12; /* Rx descriptor index */
+ uint32_t pad1:2;
+ uint32_t eop:1; /* End of packet */
+ uint32_t sop:1; /* Start of packet */
+ uint32_t qid:10;
+ uint32_t rss_type:4;
+ uint32_t no_csum:1; /* No checksum calculated */
+ uint32_t pad2:1;
+
+ uint32_t rss_hash:32; /* RSS hash value */
+
+ uint32_t len:14;
+ uint32_t error:1;
+ uint32_t vlan:1; /* 802.1Q VLAN frame */
+ uint32_t vtag:16; /* VLAN tag */
+
+ uint32_t csum:16;
+ uint32_t csum_ok:1; /* TCP/UDP checksum ok */
+ uint32_t udp:1;
+ uint32_t tcp:1;
+ uint32_t ipcsum_ok:1; /* IP checksum OK */
+ uint32_t ipv6:1;
+ uint32_t ipv4:1;
+ uint32_t fragment:1; /* IP fragment */
+ uint32_t fcs:1; /* Frame CRC correct */
+ uint32_t type:7;
+ uint32_t gen:1;
+} __packed;
+
+#define VMXNET3_REV1_MAGIC 0XBABEFEE1
+
+#define VMXNET3_GOS_UNKNOWN 0x00
+#define VMXNET3_GOS_LINUX 0x04
+#define VMXNET3_GOS_WINDOWS 0x08
+#define VMXNET3_GOS_SOLARIS 0x0C
+#define VMXNET3_GOS_FREEBSD 0x10
+#define VMXNET3_GOS_PXE 0x14
+
+#define VMXNET3_GOS_32BIT 0x01
+#define VMXNET3_GOS_64BIT 0x02
+
+#define VMXNET3_MAX_TX_QUEUES 8
+#define VMXNET3_MAX_RX_QUEUES 16
+#define VMXNET3_MAX_INTRS \
+ (VMXNET3_MAX_TX_QUEUES + VMXNET3_MAX_RX_QUEUES + 1)
+
+#define VMXNET3_ICTRL_DISABLE_ALL 0x01
+
+#define VMXNET3_RXMODE_UCAST 0x01
+#define VMXNET3_RXMODE_MCAST 0x02
+#define VMXNET3_RXMODE_BCAST 0x04
+#define VMXNET3_RXMODE_ALLMULTI 0x08
+#define VMXNET3_RXMODE_PROMISC 0x10
+
+#define VMXNET3_EVENT_RQERROR 0x01
+#define VMXNET3_EVENT_TQERROR 0x02
+#define VMXNET3_EVENT_LINK 0x04
+#define VMXNET3_EVENT_DIC 0x08
+#define VMXNET3_EVENT_DEBUG 0x10
+
+#define VMXNET3_MIN_MTU 60
+#define VMXNET3_MAX_MTU 9000
+
+/* Interrupt mask mode. */
+#define VMXNET3_IMM_AUTO 0x00
+#define VMXNET3_IMM_ACTIVE 0x01
+#define VMXNET3_IMM_LAZY 0x02
+
+/* Interrupt type. */
+#define VMXNET3_IT_AUTO 0x00
+#define VMXNET3_IT_LEGACY 0x01
+#define VMXNET3_IT_MSI 0x02
+#define VMXNET3_IT_MSIX 0x03
+
+struct vmxnet3_driver_shared {
+ uint32_t magic;
+ uint32_t pad1;
+
+ /* Misc. control */
+ uint32_t version; /* Driver version */
+ uint32_t guest; /* Guest OS */
+ uint32_t vmxnet3_revision; /* Supported VMXNET3 revision */
+ uint32_t upt_version; /* Supported UPT version */
+ uint64_t upt_features;
+ uint64_t driver_data;
+ uint64_t queue_shared;
+ uint32_t driver_data_len;
+ uint32_t queue_shared_len;
+ uint32_t mtu;
+ uint16_t nrxsg_max;
+ uint8_t ntxqueue;
+ uint8_t nrxqueue;
+ uint32_t reserved1[4];
+
+ /* Interrupt control */
+ uint8_t automask;
+ uint8_t nintr;
+ uint8_t evintr;
+ uint8_t modlevel[VMXNET3_MAX_INTRS];
+ uint32_t ictrl;
+ uint32_t reserved2[2];
+
+ /* Receive filter parameters */
+ uint32_t rxmode;
+ uint16_t mcast_tablelen;
+ uint16_t pad2;
+ uint64_t mcast_table;
+ uint32_t vlan_filter[4096 / 32];
+
+ struct {
+ uint32_t version;
+ uint32_t len;
+ uint64_t paddr;
+ } rss, pm, plugin;
+
+ uint32_t event;
+ uint32_t reserved3[5];
+} __packed;
+
+struct vmxnet3_txq_shared {
+ /* Control */
+ uint32_t npending;
+ uint32_t intr_threshold;
+ uint64_t reserved1;
+
+ /* Config */
+ uint64_t cmd_ring;
+ uint64_t data_ring;
+ uint64_t comp_ring;
+ uint64_t driver_data;
+ uint64_t reserved2;
+ uint32_t cmd_ring_len;
+ uint32_t data_ring_len;
+ uint32_t comp_ring_len;
+ uint32_t driver_data_len;
+ uint8_t intr_idx;
+ uint8_t pad1[7];
+
+ /* Queue status */
+ uint8_t stopped;
+ uint8_t pad2[3];
+ uint32_t error;
+
+ struct UPT1_TxStats stats;
+
+ uint8_t pad3[88];
+} __packed;
+
+struct vmxnet3_rxq_shared {
+ uint8_t update_rxhead;
+ uint8_t pad1[7];
+ uint64_t reserved1;
+
+ uint64_t cmd_ring[2];
+ uint64_t comp_ring;
+ uint64_t driver_data;
+ uint64_t reserved2;
+ uint32_t cmd_ring_len[2];
+ uint32_t comp_ring_len;
+ uint32_t driver_data_len;
+ uint8_t intr_idx;
+ uint8_t pad2[7];
+
+ uint8_t stopped;
+ uint8_t pad3[3];
+ uint32_t error;
+
+ struct UPT1_RxStats stats;
+
+ uint8_t pad4[88];
+} __packed;
+
+#endif /* _IF_VMXREG_H */
diff --git a/sys/dev/vmware/vmxnet3/if_vmxvar.h b/sys/dev/vmware/vmxnet3/if_vmxvar.h
new file mode 100644
index 0000000..90fe761
--- /dev/null
+++ b/sys/dev/vmware/vmxnet3/if_vmxvar.h
@@ -0,0 +1,319 @@
+/*-
+ * Copyright (c) 2013 Tsubai Masanari
+ * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_VMXVAR_H
+#define _IF_VMXVAR_H
+
+struct vmxnet3_softc;
+
+struct vmxnet3_dma_alloc {
+ bus_addr_t dma_paddr;
+ caddr_t dma_vaddr;
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+ bus_size_t dma_size;
+};
+
+/*
+ * The number of Rx/Tx queues this driver supports.
+ */
+#define VMXNET3_RX_QUEUES 1
+#define VMXNET3_TX_QUEUES 1
+
+/*
+ * The number of Rx rings in each Rx queue.
+ */
+#define VMXNET3_RXRINGS_PERQ 2
+
+/*
+ * The maximum number of descriptors in each Rx/Tx ring.
+ */
+#define VMXNET3_MAX_TX_NDESC 512
+#define VMXNET3_MAX_RX_NDESC 256
+#define VMXNET3_MAX_TX_NCOMPDESC VMXNET3_MAX_TX_NDESC
+#define VMXNET3_MAX_RX_NCOMPDESC \
+ (VMXNET3_MAX_RX_NDESC * VMXNET3_RXRINGS_PERQ)
+
+/*
+ * The maximum number of Rx segments we accept. When LRO is enabled,
+ * this allows us to receive the maximum sized frame with one MCLBYTES
+ * cluster followed by 16 MJUMPAGESIZE clusters.
+ */
+#define VMXNET3_MAX_RX_SEGS 17
+
+struct vmxnet3_txbuf {
+ bus_dmamap_t vtxb_dmamap;
+ struct mbuf *vtxb_m;
+};
+
+struct vmxnet3_txring {
+ struct vmxnet3_txbuf *vxtxr_txbuf;
+ u_int vxtxr_head;
+ u_int vxtxr_next;
+ u_int vxtxr_ndesc;
+ int vxtxr_gen;
+ bus_dma_tag_t vxtxr_txtag;
+ struct vmxnet3_txdesc *vxtxr_txd;
+ struct vmxnet3_dma_alloc vxtxr_dma;
+};
+
+static inline int
+VMXNET3_TXRING_AVAIL(struct vmxnet3_txring *txr)
+{
+ int avail = txr->vxtxr_next - txr->vxtxr_head - 1;
+ return (avail < 0 ? txr->vxtxr_ndesc + avail : avail);
+}
+
+struct vmxnet3_rxbuf {
+ bus_dmamap_t vrxb_dmamap;
+ struct mbuf *vrxb_m;
+};
+
+struct vmxnet3_rxring {
+ struct vmxnet3_rxbuf *vxrxr_rxbuf;
+ struct vmxnet3_rxdesc *vxrxr_rxd;
+ u_int vxrxr_fill;
+ u_int vxrxr_ndesc;
+ int vxrxr_gen;
+ int vxrxr_rid;
+ bus_dma_tag_t vxrxr_rxtag;
+ struct vmxnet3_dma_alloc vxrxr_dma;
+ bus_dmamap_t vxrxr_spare_dmap;
+};
+
+static inline void
+vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr)
+{
+
+ if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) {
+ rxr->vxrxr_fill = 0;
+ rxr->vxrxr_gen ^= 1;
+ }
+}
+
+struct vmxnet3_comp_ring {
+ union {
+ struct vmxnet3_txcompdesc *txcd;
+ struct vmxnet3_rxcompdesc *rxcd;
+ } vxcr_u;
+ u_int vxcr_next;
+ u_int vxcr_ndesc;
+ int vxcr_gen;
+ struct vmxnet3_dma_alloc vxcr_dma;
+};
+
+struct vmxnet3_txq_stats {
+ uint64_t vtxrs_full;
+ uint64_t vtxrs_offload_failed;
+};
+
+struct vmxnet3_txqueue {
+ struct mtx vxtxq_mtx;
+ struct vmxnet3_softc *vxtxq_sc;
+ int vxtxq_id;
+ int vxtxq_intr_idx;
+ int vxtxq_watchdog;
+ struct vmxnet3_txring vxtxq_cmd_ring;
+ struct vmxnet3_comp_ring vxtxq_comp_ring;
+ struct vmxnet3_txq_stats vxtxq_stats;
+ struct vmxnet3_txq_shared *vxtxq_ts;
+ struct sysctl_oid_list *vxtxq_sysctl;
+ char vxtxq_name[16];
+};
+
+#define VMXNET3_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vxtxq_mtx)
+#define VMXNET3_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vxtxq_mtx)
+#define VMXNET3_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vxtxq_mtx)
+#define VMXNET3_TXQ_LOCK_ASSERT(_txq) \
+ mtx_assert(&(_txq)->vxtxq_mtx, MA_OWNED)
+#define VMXNET3_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
+ mtx_assert(&(_txq)->vxtxq_mtx, MA_NOTOWNED)
+
+struct vmxnet3_rxq_stats {
+
+};
+
+struct vmxnet3_rxqueue {
+ struct mtx vxrxq_mtx;
+ struct vmxnet3_softc *vxrxq_sc;
+ int vxrxq_id;
+ int vxrxq_intr_idx;
+ struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
+ struct vmxnet3_comp_ring vxrxq_comp_ring;
+ struct vmxnet3_rxq_stats vxrxq_stats;
+ struct vmxnet3_rxq_shared *vxrxq_rs;
+ struct sysctl_oid_list *vxrxq_sysctl;
+ char vxrxq_name[16];
+};
+
+#define VMXNET3_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vxrxq_mtx)
+#define VMXNET3_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vxrxq_mtx)
+#define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \
+ mtx_assert(&(_rxq)->vxrxq_mtx, MA_OWNED)
+#define VMXNET3_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \
+ mtx_assert(&(_rxq)->vxrxq_mtx, MA_NOTOWNED)
+
+struct vmxnet3_statistics {
+ uint32_t vmst_collapsed;
+ uint32_t vmst_mgetcl_failed;
+ uint32_t vmst_mbuf_load_failed;
+
+};
+
+struct vmxnet3_interrupt {
+ struct resource *vmxi_irq;
+ int vmxi_rid;
+ void *vmxi_handler;
+};
+
+struct vmxnet3_softc {
+ device_t vmx_dev;
+ struct ifnet *vmx_ifp;
+ struct vmxnet3_driver_shared *vmx_ds;
+ uint32_t vmx_flags;
+#define VMXNET3_FLAG_NO_MSIX 0x0001
+
+ struct vmxnet3_rxqueue *vmx_rxq;
+ struct vmxnet3_txqueue *vmx_txq;
+
+ struct resource *vmx_res0;
+ bus_space_tag_t vmx_iot0;
+ bus_space_handle_t vmx_ioh0;
+ struct resource *vmx_res1;
+ bus_space_tag_t vmx_iot1;
+ bus_space_handle_t vmx_ioh1;
+ struct resource *vmx_msix_res;
+
+ int vmx_link_active;
+ int vmx_link_speed;
+ int vmx_if_flags;
+ int vmx_ntxqueues;
+ int vmx_nrxqueues;
+ int vmx_ntxdescs;
+ int vmx_nrxdescs;
+ int vmx_max_rxsegs;
+ int vmx_rx_max_chain;
+
+ struct vmxnet3_statistics vmx_stats;
+
+ int vmx_intr_type;
+ int vmx_intr_mask_mode;
+ int vmx_event_intr_idx;
+ int vmx_nintrs;
+ struct vmxnet3_interrupt vmx_intrs[VMXNET3_MAX_INTRS];
+
+ struct mtx vmx_mtx;
+ uint8_t *vmx_mcast;
+ void *vmx_qs;
+ struct callout vmx_tick;
+ struct vmxnet3_dma_alloc vmx_ds_dma;
+ struct vmxnet3_dma_alloc vmx_qs_dma;
+ struct vmxnet3_dma_alloc vmx_mcast_dma;
+ struct ifmedia vmx_media;
+ eventhandler_tag vmx_vlan_attach;
+ eventhandler_tag vmx_vlan_detach;
+ uint8_t vmx_vlan_filter[4096/32];
+ uint8_t vmx_lladdr[ETHER_ADDR_LEN];
+};
+
+#define VMXNET3_CORE_LOCK_INIT(_sc, _name) \
+ mtx_init(&(_sc)->vmx_mtx, _name, "VMXNET3 Lock", MTX_DEF)
+#define VMXNET3_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->vmx_mtx)
+#define VMXNET3_CORE_LOCK(_sc) mtx_lock(&(_sc)->vmx_mtx)
+#define VMXNET3_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->vmx_mtx)
+#define VMXNET3_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->vmx_mtx, MA_OWNED)
+#define VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
+ mtx_assert(&(_sc)->vmx_mtx, MA_NOTOWNED)
+
+/*
+ * Our driver version we report to the hypervisor; we just keep
+ * this value constant.
+ */
+#define VMXNET3_DRIVER_VERSION 0x00010000
+
+/*
+ * Convert the FreeBSD version in to something the hypervisor
+ * understands. This is apparently what VMware's driver reports
+ * so mimic it even though it probably is not required.
+ */
+#define VMXNET3_GUEST_OS_VERSION \
+ (((__FreeBSD_version / 100000) << 14) | \
+ (((__FreeBSD_version / 1000) % 100) << 6 ) | \
+ (((__FreeBSD_version / 100) % 10) << 30) | \
+ ((__FreeBSD_version % 100) << 22))
+
+/*
+ * Max descriptors per Tx packet. We must limit the size of the
+ * any TSO packets based on the number of segments.
+ */
+#define VMXNET3_TX_MAXSEGS 32
+#define VMXNET3_TSO_MAXSIZE 65550
+
+/*
+ * Maximum support Tx segments size. The length field in the
+ * Tx descriptor is 14 bits.
+ */
+#define VMXNET3_TX_MAXSEGSIZE (1 << 14)
+
+/*
+ * Predetermined size of the multicast MACs filter table. If the
+ * number of multicast addresses exceeds this size, then the
+ * ALL_MULTI mode is use instead.
+ */
+#define VMXNET3_MULTICAST_MAX 32
+
+/*
+ * Our Tx watchdog timeout.
+ */
+#define VMXNET3_WATCHDOG_TIMEOUT 5
+
+/*
+ * IP protocols that we can perform Tx checksum offloading of.
+ */
+#define VMXNET3_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
+#define VMXNET3_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
+
+#define VMXNET3_CSUM_ALL_OFFLOAD \
+ (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
+
+/*
+ * Compat macros to keep this driver compiling on old releases.
+ */
+
+#if !defined(SYSCTL_ADD_UQUAD)
+#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
+#endif
+
+#if !defined(IFCAP_TXCSUM_IPV6)
+#define IFCAP_TXCSUM_IPV6 0
+#endif
+
+#if !defined(IFCAP_RXCSUM_IPV6)
+#define IFCAP_RXCSUM_IPV6 0
+#endif
+
+#if !defined(CSUM_TCP_IPV6)
+#define CSUM_TCP_IPV6 0
+#endif
+
+#if !defined(CSUM_UDP_IPV6)
+#define CSUM_UDP_IPV6 0
+#endif
+
+#endif /* _IF_VMXVAR_H */
diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c
index 4d55f51..57b9902 100644
--- a/sys/fs/tmpfs/tmpfs_vfsops.c
+++ b/sys/fs/tmpfs/tmpfs_vfsops.c
@@ -47,6 +47,8 @@ __FBSDID("$FreeBSD$");
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/stat.h>
#include <sys/systm.h>
@@ -138,6 +140,7 @@ tmpfs_mount(struct mount *mp)
sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
struct tmpfs_mount *tmp;
struct tmpfs_node *root;
+ struct thread *td = curthread;
int error;
/* Size counters. */
u_quad_t pages;
@@ -150,6 +153,9 @@ tmpfs_mount(struct mount *mp)
struct vattr va;
+ if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_TMPFS))
+ return (EPERM);
+
if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
return (EINVAL);
@@ -420,4 +426,4 @@ struct vfsops tmpfs_vfsops = {
.vfs_statfs = tmpfs_statfs,
.vfs_fhtovp = tmpfs_fhtovp,
};
-VFS_SET(tmpfs_vfsops, tmpfs, 0);
+VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
diff --git a/sys/geom/geom_disk.c b/sys/geom/geom_disk.c
index 60cff73..732b35d 100644
--- a/sys/geom/geom_disk.c
+++ b/sys/geom/geom_disk.c
@@ -460,6 +460,12 @@ g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g
if (dp->d_getattr(bp) == 0)
sbuf_printf(sb, "%s<lunid>%s</lunid>\n",
indent, buf);
+ bp->bio_attribute = "GEOM::lunname";
+ bp->bio_length = DISK_IDENT_SIZE;
+ bp->bio_data = buf;
+ if (dp->d_getattr(bp) == 0)
+ sbuf_printf(sb, "%s<lunname>%s</lunname>\n",
+ indent, buf);
g_destroy_bio(bp);
g_free(buf);
} else
diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c
index d70a936..6451825 100644
--- a/sys/kern/kern_jail.c
+++ b/sys/kern/kern_jail.c
@@ -206,6 +206,7 @@ static char *pr_allow_names[] = {
"allow.mount.nullfs",
"allow.mount.zfs",
"allow.mount.procfs",
+ "allow.mount.tmpfs",
};
const size_t pr_allow_names_size = sizeof(pr_allow_names);
@@ -221,6 +222,7 @@ static char *pr_allow_nonames[] = {
"allow.mount.nonullfs",
"allow.mount.nozfs",
"allow.mount.noprocfs",
+ "allow.mount.notmpfs",
};
const size_t pr_allow_nonames_size = sizeof(pr_allow_nonames);
@@ -4228,6 +4230,10 @@ SYSCTL_PROC(_security_jail, OID_AUTO, mount_procfs_allowed,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, PR_ALLOW_MOUNT_PROCFS, sysctl_jail_default_allow, "I",
"Processes in jail can mount the procfs file system");
+SYSCTL_PROC(_security_jail, OID_AUTO, mount_tmpfs_allowed,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ NULL, PR_ALLOW_MOUNT_TMPFS, sysctl_jail_default_allow, "I",
+ "Processes in jail can mount the tmpfs file system");
SYSCTL_PROC(_security_jail, OID_AUTO, mount_zfs_allowed,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, PR_ALLOW_MOUNT_ZFS, sysctl_jail_default_allow, "I",
@@ -4380,6 +4386,8 @@ SYSCTL_JAIL_PARAM(_allow_mount, nullfs, CTLTYPE_INT | CTLFLAG_RW,
"B", "Jail may mount the nullfs file system");
SYSCTL_JAIL_PARAM(_allow_mount, procfs, CTLTYPE_INT | CTLFLAG_RW,
"B", "Jail may mount the procfs file system");
+SYSCTL_JAIL_PARAM(_allow_mount, tmpfs, CTLTYPE_INT | CTLFLAG_RW,
+ "B", "Jail may mount the tmpfs file system");
SYSCTL_JAIL_PARAM(_allow_mount, zfs, CTLTYPE_INT | CTLFLAG_RW,
"B", "Jail may mount the zfs file system");
diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c
index df9b854..85d62ea 100644
--- a/sys/kern/kern_mbuf.c
+++ b/sys/kern/kern_mbuf.c
@@ -410,18 +410,14 @@ mb_ctor_mbuf(void *mem, int size, void *arg, int how)
{
struct mbuf *m;
struct mb_args *args;
-#ifdef MAC
int error;
-#endif
int flags;
short type;
#ifdef INVARIANTS
trash_ctor(mem, size, arg, how);
#endif
- m = (struct mbuf *)mem;
args = (struct mb_args *)arg;
- flags = args->flags;
type = args->type;
/*
@@ -431,32 +427,12 @@ mb_ctor_mbuf(void *mem, int size, void *arg, int how)
if (type == MT_NOINIT)
return (0);
- m->m_next = NULL;
- m->m_nextpkt = NULL;
- m->m_len = 0;
- m->m_flags = flags;
- m->m_type = type;
- if (flags & M_PKTHDR) {
- m->m_data = m->m_pktdat;
- m->m_pkthdr.rcvif = NULL;
- m->m_pkthdr.header = NULL;
- m->m_pkthdr.len = 0;
- m->m_pkthdr.csum_flags = 0;
- m->m_pkthdr.csum_data = 0;
- m->m_pkthdr.tso_segsz = 0;
- m->m_pkthdr.ether_vtag = 0;
- m->m_pkthdr.flowid = 0;
- m->m_pkthdr.fibnum = 0;
- SLIST_INIT(&m->m_pkthdr.tags);
-#ifdef MAC
- /* If the label init fails, fail the alloc */
- error = mac_mbuf_init(m, how);
- if (error)
- return (error);
-#endif
- } else
- m->m_data = m->m_dat;
- return (0);
+ m = (struct mbuf *)mem;
+ flags = args->flags;
+
+ error = m_init(m, NULL, size, how, type, flags);
+
+ return (error);
}
/*
@@ -571,6 +547,7 @@ mb_ctor_clust(void *mem, int size, void *arg, int how)
m->m_ext.ext_arg2 = NULL;
m->m_ext.ext_size = size;
m->m_ext.ext_type = type;
+ m->m_ext.ext_flags = 0;
m->m_ext.ref_cnt = refcnt;
}
@@ -656,34 +633,14 @@ mb_ctor_pack(void *mem, int size, void *arg, int how)
#ifdef INVARIANTS
trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
#endif
- m->m_next = NULL;
- m->m_nextpkt = NULL;
- m->m_data = m->m_ext.ext_buf;
- m->m_len = 0;
- m->m_flags = (flags | M_EXT);
- m->m_type = type;
-
- if (flags & M_PKTHDR) {
- m->m_pkthdr.rcvif = NULL;
- m->m_pkthdr.len = 0;
- m->m_pkthdr.header = NULL;
- m->m_pkthdr.csum_flags = 0;
- m->m_pkthdr.csum_data = 0;
- m->m_pkthdr.tso_segsz = 0;
- m->m_pkthdr.ether_vtag = 0;
- m->m_pkthdr.flowid = 0;
- m->m_pkthdr.fibnum = 0;
- SLIST_INIT(&m->m_pkthdr.tags);
-#ifdef MAC
- /* If the label init fails, fail the alloc */
- error = mac_mbuf_init(m, how);
- if (error)
- return (error);
-#endif
- }
+
+ error = m_init(m, NULL, size, how, type, flags);
+
/* m_ext is already initialized. */
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags = (flags | M_EXT);
- return (0);
+ return (error);
}
int
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index ab9c344..88cd0cf 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -54,6 +54,36 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
dev->si_iosize_max = DFLTPHYS;
}
+ /*
+ * If the driver does not want I/O to be split, that means that we
+ * need to reject any requests that will not fit into one buffer.
+ */
+ if ((dev->si_flags & SI_NOSPLIT) &&
+ ((uio->uio_resid > dev->si_iosize_max) ||
+ (uio->uio_resid > MAXPHYS) ||
+ (uio->uio_iovcnt > 1))) {
+ /*
+ * Tell the user why his I/O was rejected.
+ */
+ if (uio->uio_resid > dev->si_iosize_max)
+ printf("%s: request size %zd > si_iosize_max=%d, "
+ "cannot split request\n", devtoname(dev),
+ uio->uio_resid, dev->si_iosize_max);
+
+ if (uio->uio_resid > MAXPHYS)
+ printf("%s: request size %zd > MAXPHYS=%d, "
+ "cannot split request\n", devtoname(dev),
+ uio->uio_resid, MAXPHYS);
+
+ if (uio->uio_iovcnt > 1)
+ printf("%s: request vectors=%d > 1, "
+ "cannot split request\n", devtoname(dev),
+ uio->uio_iovcnt);
+
+ error = EFBIG;
+ goto doerror;
+ }
+
for (i = 0; i < uio->uio_iovcnt; i++) {
while (uio->uio_iov[i].iov_len) {
bp->b_flags = 0;
@@ -83,6 +113,17 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
*/
iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK;
if ((bp->b_bcount + iolen) > bp->b_kvasize) {
+ /*
+ * This device does not want I/O to be split.
+ */
+ if (dev->si_flags & SI_NOSPLIT) {
+ printf("%s: request ptr %#jx is not "
+ "on a page boundary, cannot split "
+ "request\n", devtoname(dev),
+ (uintmax_t)bp->b_data);
+ error = EFBIG;
+ goto doerror;
+ }
bp->b_bcount = bp->b_kvasize;
if (iolen != 0)
bp->b_bcount -= PAGE_SIZE;
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 8395aac..f02e8ce 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -267,6 +267,7 @@ m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
mb->m_ext.ext_arg1 = arg1;
mb->m_ext.ext_arg2 = arg2;
mb->m_ext.ext_type = type;
+ mb->m_ext.ext_flags = 0;
return (0);
}
@@ -342,6 +343,7 @@ mb_free_ext(struct mbuf *m)
m->m_ext.ref_cnt = NULL;
m->m_ext.ext_size = 0;
m->m_ext.ext_type = 0;
+ m->m_ext.ext_flags = 0;
m->m_flags &= ~M_EXT;
uma_zfree(zone_mbuf, m);
}
@@ -368,6 +370,7 @@ mb_dupcl(struct mbuf *n, struct mbuf *m)
n->m_ext.ext_size = m->m_ext.ext_size;
n->m_ext.ref_cnt = m->m_ext.ref_cnt;
n->m_ext.ext_type = m->m_ext.ext_type;
+ n->m_ext.ext_flags = m->m_ext.ext_flags;
n->m_flags |= M_EXT;
n->m_flags |= m->m_flags & M_RDONLY;
}
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index b95c77b..5d55bae 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -342,6 +342,7 @@ SUBDIR= \
${_viawd} \
vkbd \
${_vmm} \
+ ${_vmware} \
${_vpo} \
vr \
vte \
@@ -368,6 +369,7 @@ SUBDIR= \
.if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64"
_filemon= filemon
+_vmware= vmware
.endif
.if ${MACHINE_CPUARCH} != "powerpc" && ${MACHINE_CPUARCH} != "arm" && \
diff --git a/sys/modules/rdma/Makefile b/sys/modules/rdma/Makefile
index 83edf09..f5d7255 100644
--- a/sys/modules/rdma/Makefile
+++ b/sys/modules/rdma/Makefile
@@ -1,9 +1,9 @@
# $FreeBSD$
-SUBDIR= addr
-SUBDIR+= cma
-SUBDIR+= iwcm
-SUBDIR+= core
+#SUBDIR= addr
+#SUBDIR+= cma
+#SUBDIR+= iwcm
+#SUBDIR+= core
SUBDIR+= krping
.include <bsd.subdir.mk>
diff --git a/sys/modules/vmware/Makefile b/sys/modules/vmware/Makefile
new file mode 100644
index 0000000..695d344
--- /dev/null
+++ b/sys/modules/vmware/Makefile
@@ -0,0 +1,28 @@
+#
+# $FreeBSD$
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+SUBDIR= vmxnet3
+
+.include <bsd.subdir.mk>
diff --git a/sys/modules/vmware/vmxnet3/Makefile b/sys/modules/vmware/vmxnet3/Makefile
new file mode 100644
index 0000000..b9608e0
--- /dev/null
+++ b/sys/modules/vmware/vmxnet3/Makefile
@@ -0,0 +1,44 @@
+#
+# $FreeBSD$
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+.PATH: ${.CURDIR}/../../../dev/vmware/vmxnet3
+
+KMOD= if_vmx
+SRCS= if_vmx.c
+SRCS+= bus_if.h device_if.h pci_if.h opt_inet.h opt_inet6.h
+
+.if !defined(KERNBUILDDIR)
+.if ${MK_INET_SUPPORT} != "no"
+opt_inet.h:
+ @echo "#define INET 1" > ${.TARGET}
+.endif
+
+.if ${MK_INET6_SUPPORT} != "no"
+opt_inet6.h:
+ @echo "#define INET6 1" > ${.TARGET}
+.endif
+.endif
+
+.include <bsd.kmod.mk>
diff --git a/sys/net/pfil.c b/sys/net/pfil.c
index 9939d72..44373ee 100644
--- a/sys/net/pfil.c
+++ b/sys/net/pfil.c
@@ -52,11 +52,9 @@ static struct mtx pfil_global_lock;
MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock",
MTX_DEF);
-static int pfil_list_add(pfil_list_t *, struct packet_filter_hook *, int);
-
-static int pfil_list_remove(pfil_list_t *,
- int (*)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *),
- void *);
+static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *);
+static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int);
+static int pfil_chain_remove(pfil_chain_t *, pfil_func_t, void *);
LIST_HEAD(pfilheadhead, pfil_head);
VNET_DEFINE(struct pfilheadhead, pfil_head_list);
@@ -65,7 +63,7 @@ VNET_DEFINE(struct rmlock, pfil_lock);
#define V_pfil_lock VNET(pfil_lock)
/*
- * pfil_run_hooks() runs the specified packet filter hooks.
+ * pfil_run_hooks() runs the specified packet filter hook chain.
*/
int
pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
@@ -78,8 +76,8 @@ pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
PFIL_RLOCK(ph, &rmpt);
KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0"));
- for (pfh = pfil_hook_get(dir, ph); pfh != NULL;
- pfh = TAILQ_NEXT(pfh, pfil_link)) {
+ for (pfh = pfil_chain_get(dir, ph); pfh != NULL;
+ pfh = TAILQ_NEXT(pfh, pfil_chain)) {
if (pfh->pfil_func != NULL) {
rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir,
inp);
@@ -92,6 +90,18 @@ pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
return (rv);
}
+static struct packet_filter_hook *
+pfil_chain_get(int dir, struct pfil_head *ph)
+{
+
+ if (dir == PFIL_IN)
+ return (TAILQ_FIRST(&ph->ph_in));
+ else if (dir == PFIL_OUT)
+ return (TAILQ_FIRST(&ph->ph_out));
+ else
+ return (NULL);
+}
+
/*
* pfil_try_rlock() acquires rm reader lock for specified head
* if this is immediately possible.
@@ -153,6 +163,7 @@ pfil_wowned(struct pfil_head *ph)
return (PFIL_WOWNED(ph));
}
+
/*
* pfil_head_register() registers a pfil_head with the packet filter hook
* mechanism.
@@ -162,11 +173,11 @@ pfil_head_register(struct pfil_head *ph)
{
struct pfil_head *lph;
- PFIL_LIST_LOCK();
+ PFIL_HEADLIST_LOCK();
LIST_FOREACH(lph, &V_pfil_head_list, ph_list) {
if (ph->ph_type == lph->ph_type &&
ph->ph_un.phu_val == lph->ph_un.phu_val) {
- PFIL_LIST_UNLOCK();
+ PFIL_HEADLIST_UNLOCK();
return (EEXIST);
}
}
@@ -175,7 +186,7 @@ pfil_head_register(struct pfil_head *ph)
TAILQ_INIT(&ph->ph_in);
TAILQ_INIT(&ph->ph_out);
LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list);
- PFIL_LIST_UNLOCK();
+ PFIL_HEADLIST_UNLOCK();
return (0);
}
@@ -189,12 +200,12 @@ pfil_head_unregister(struct pfil_head *ph)
{
struct packet_filter_hook *pfh, *pfnext;
- PFIL_LIST_LOCK();
+ PFIL_HEADLIST_LOCK();
LIST_REMOVE(ph, ph_list);
- PFIL_LIST_UNLOCK();
- TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_link, pfnext)
+ PFIL_HEADLIST_UNLOCK();
+ TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext)
free(pfh, M_IFADDR);
- TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_link, pfnext)
+ TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext)
free(pfh, M_IFADDR);
PFIL_LOCK_DESTROY(ph);
return (0);
@@ -208,11 +219,11 @@ pfil_head_get(int type, u_long val)
{
struct pfil_head *ph;
- PFIL_LIST_LOCK();
+ PFIL_HEADLIST_LOCK();
LIST_FOREACH(ph, &V_pfil_head_list, ph_list)
if (ph->ph_type == type && ph->ph_un.phu_val == val)
break;
- PFIL_LIST_UNLOCK();
+ PFIL_HEADLIST_UNLOCK();
return (ph);
}
@@ -225,8 +236,7 @@ pfil_head_get(int type, u_long val)
* PFIL_WAITOK OK to call malloc with M_WAITOK.
*/
int
-pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
- struct inpcb *), void *arg, int flags, struct pfil_head *ph)
+pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
{
struct packet_filter_hook *pfh1 = NULL;
struct packet_filter_hook *pfh2 = NULL;
@@ -252,7 +262,7 @@ pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
if (flags & PFIL_IN) {
pfh1->pfil_func = func;
pfh1->pfil_arg = arg;
- err = pfil_list_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
+ err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
if (err)
goto locked_error;
ph->ph_nhooks++;
@@ -260,10 +270,10 @@ pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
if (flags & PFIL_OUT) {
pfh2->pfil_func = func;
pfh2->pfil_arg = arg;
- err = pfil_list_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
+ err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
if (err) {
if (flags & PFIL_IN)
- pfil_list_remove(&ph->ph_in, func, arg);
+ pfil_chain_remove(&ph->ph_in, func, arg);
goto locked_error;
}
ph->ph_nhooks++;
@@ -282,22 +292,21 @@ error:
/*
* pfil_remove_hook removes a specific function from the packet filter hook
- * list.
+ * chain.
*/
int
-pfil_remove_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
- struct inpcb *), void *arg, int flags, struct pfil_head *ph)
+pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
{
int err = 0;
PFIL_WLOCK(ph);
if (flags & PFIL_IN) {
- err = pfil_list_remove(&ph->ph_in, func, arg);
+ err = pfil_chain_remove(&ph->ph_in, func, arg);
if (err == 0)
ph->ph_nhooks--;
}
if ((err == 0) && (flags & PFIL_OUT)) {
- err = pfil_list_remove(&ph->ph_out, func, arg);
+ err = pfil_chain_remove(&ph->ph_out, func, arg);
if (err == 0)
ph->ph_nhooks--;
}
@@ -305,15 +314,18 @@ pfil_remove_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
return (err);
}
+/*
+ * Internal: Add a new pfil hook into a hook chain.
+ */
static int
-pfil_list_add(pfil_list_t *list, struct packet_filter_hook *pfh1, int flags)
+pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags)
{
struct packet_filter_hook *pfh;
/*
* First make sure the hook is not already there.
*/
- TAILQ_FOREACH(pfh, list, pfil_link)
+ TAILQ_FOREACH(pfh, chain, pfil_chain)
if (pfh->pfil_func == pfh1->pfil_func &&
pfh->pfil_arg == pfh1->pfil_arg)
return (EEXIST);
@@ -323,26 +335,23 @@ pfil_list_add(pfil_list_t *list, struct packet_filter_hook *pfh1, int flags)
* the same path is followed in or out of the kernel.
*/
if (flags & PFIL_IN)
- TAILQ_INSERT_HEAD(list, pfh1, pfil_link);
+ TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain);
else
- TAILQ_INSERT_TAIL(list, pfh1, pfil_link);
+ TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain);
return (0);
}
/*
- * pfil_list_remove is an internal function that takes a function off the
- * specified list.
+ * Internal: Remove a pfil hook from a hook chain.
*/
static int
-pfil_list_remove(pfil_list_t *list,
- int (*func)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *),
- void *arg)
+pfil_chain_remove(pfil_chain_t *chain, pfil_func_t func, void *arg)
{
struct packet_filter_hook *pfh;
- TAILQ_FOREACH(pfh, list, pfil_link)
+ TAILQ_FOREACH(pfh, chain, pfil_chain)
if (pfh->pfil_func == func && pfh->pfil_arg == arg) {
- TAILQ_REMOVE(list, pfh, pfil_link);
+ TAILQ_REMOVE(chain, pfh, pfil_chain);
free(pfh, M_IFADDR);
return (0);
}
@@ -369,7 +378,8 @@ static int
vnet_pfil_uninit(const void *unused)
{
- /* XXX should panic if list is not empty */
+ KASSERT(LIST_EMPTY(&V_pfil_head_list),
+ ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list));
PFIL_LOCK_DESTROY_REAL(&V_pfil_lock);
return (0);
}
diff --git a/sys/net/pfil.h b/sys/net/pfil.h
index 9cdb422..c9a1b65 100644
--- a/sys/net/pfil.h
+++ b/sys/net/pfil.h
@@ -43,15 +43,18 @@ struct mbuf;
struct ifnet;
struct inpcb;
+typedef int (*pfil_func_t)(void *, struct mbuf **, struct ifnet *, int,
+ struct inpcb *);
+
/*
* The packet filter hooks are designed for anything to call them to
- * possibly intercept the packet.
+ * possibly intercept the packet. Multiple filter hooks are chained
+ * together and after each other in the specified order.
*/
struct packet_filter_hook {
- TAILQ_ENTRY(packet_filter_hook) pfil_link;
- int (*pfil_func)(void *, struct mbuf **, struct ifnet *, int,
- struct inpcb *);
- void *pfil_arg;
+ TAILQ_ENTRY(packet_filter_hook) pfil_chain;
+ pfil_func_t pfil_func;
+ void *pfil_arg;
};
#define PFIL_IN 0x00000001
@@ -59,55 +62,62 @@ struct packet_filter_hook {
#define PFIL_WAITOK 0x00000004
#define PFIL_ALL (PFIL_IN|PFIL_OUT)
-typedef TAILQ_HEAD(pfil_list, packet_filter_hook) pfil_list_t;
+typedef TAILQ_HEAD(pfil_chain, packet_filter_hook) pfil_chain_t;
#define PFIL_TYPE_AF 1 /* key is AF_* type */
#define PFIL_TYPE_IFNET 2 /* key is ifnet pointer */
#define PFIL_FLAG_PRIVATE_LOCK 0x01 /* Personal lock instead of global */
+/*
+ * A pfil head is created by each protocol or packet intercept point.
+ * For packet is then run through the hook chain for inspection.
+ */
struct pfil_head {
- pfil_list_t ph_in;
- pfil_list_t ph_out;
- int ph_type;
- int ph_nhooks;
+ pfil_chain_t ph_in;
+ pfil_chain_t ph_out;
+ int ph_type;
+ int ph_nhooks;
#if defined( __linux__ ) || defined( _WIN32 )
- rwlock_t ph_mtx;
+ rwlock_t ph_mtx;
#else
struct rmlock *ph_plock; /* Pointer to the used lock */
- struct rmlock ph_lock; /* Private lock storage */
- int flags;
+ struct rmlock ph_lock; /* Private lock storage */
+ int flags;
#endif
union {
- u_long phu_val;
- void *phu_ptr;
+ u_long phu_val;
+ void *phu_ptr;
} ph_un;
-#define ph_af ph_un.phu_val
-#define ph_ifnet ph_un.phu_ptr
+#define ph_af ph_un.phu_val
+#define ph_ifnet ph_un.phu_ptr
LIST_ENTRY(pfil_head) ph_list;
};
-int pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *,
- int, struct inpcb *), void *, int, struct pfil_head *);
-int pfil_remove_hook(int (*func)(void *, struct mbuf **, struct ifnet *,
- int, struct inpcb *), void *, int, struct pfil_head *);
+/* Public functions for pfil hook management by packet filters. */
+struct pfil_head *pfil_head_get(int, u_long);
+int pfil_add_hook(pfil_func_t, void *, int, struct pfil_head *);
+int pfil_remove_hook(pfil_func_t, void *, int, struct pfil_head *);
+#define PFIL_HOOKED(p) ((p)->ph_nhooks > 0)
+
+/* Public functions to run the packet inspection by protocols. */
int pfil_run_hooks(struct pfil_head *, struct mbuf **, struct ifnet *,
int, struct inpcb *inp);
-struct rm_priotracker; /* Do not require including rmlock header */
-int pfil_try_rlock(struct pfil_head *, struct rm_priotracker *);
-void pfil_rlock(struct pfil_head *, struct rm_priotracker *);
-void pfil_runlock(struct pfil_head *, struct rm_priotracker *);
-void pfil_wlock(struct pfil_head *);
-void pfil_wunlock(struct pfil_head *);
-int pfil_wowned(struct pfil_head *ph);
-
+/* Public functions for pfil head management by protocols. */
int pfil_head_register(struct pfil_head *);
int pfil_head_unregister(struct pfil_head *);
-struct pfil_head *pfil_head_get(int, u_long);
-
-#define PFIL_HOOKED(p) ((p)->ph_nhooks > 0)
+/* Public pfil locking functions for self managed locks by packet filters. */
+struct rm_priotracker; /* Do not require including rmlock header */
+int pfil_try_rlock(struct pfil_head *, struct rm_priotracker *);
+void pfil_rlock(struct pfil_head *, struct rm_priotracker *);
+void pfil_runlock(struct pfil_head *, struct rm_priotracker *);
+void pfil_wlock(struct pfil_head *);
+void pfil_wunlock(struct pfil_head *);
+int pfil_wowned(struct pfil_head *ph);
+
+/* Internal pfil locking functions. */
#define PFIL_LOCK_INIT_REAL(l, t) \
rm_init_flags(l, "PFil " t " rmlock", RM_RECURSE)
#define PFIL_LOCK_DESTROY_REAL(l) \
@@ -123,25 +133,16 @@ struct pfil_head *pfil_head_get(int, u_long);
if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK) \
PFIL_LOCK_DESTROY_REAL((p)->ph_plock); \
} while (0)
+
#define PFIL_TRY_RLOCK(p, t) rm_try_rlock((p)->ph_plock, (t))
#define PFIL_RLOCK(p, t) rm_rlock((p)->ph_plock, (t))
#define PFIL_WLOCK(p) rm_wlock((p)->ph_plock)
#define PFIL_RUNLOCK(p, t) rm_runlock((p)->ph_plock, (t))
#define PFIL_WUNLOCK(p) rm_wunlock((p)->ph_plock)
#define PFIL_WOWNED(p) rm_wowned((p)->ph_plock)
-#define PFIL_LIST_LOCK() mtx_lock(&pfil_global_lock)
-#define PFIL_LIST_UNLOCK() mtx_unlock(&pfil_global_lock)
-
-static __inline struct packet_filter_hook *
-pfil_hook_get(int dir, struct pfil_head *ph)
-{
-
- if (dir == PFIL_IN)
- return (TAILQ_FIRST(&ph->ph_in));
- else if (dir == PFIL_OUT)
- return (TAILQ_FIRST(&ph->ph_out));
- else
- return (NULL);
-}
+
+/* Internal locking macros for global/vnet pfil_head_list. */
+#define PFIL_HEADLIST_LOCK() mtx_lock(&pfil_global_lock)
+#define PFIL_HEADLIST_UNLOCK() mtx_unlock(&pfil_global_lock)
#endif /* _NET_PFIL_H_ */
diff --git a/sys/netpfil/ipfw/ip_dummynet.c b/sys/netpfil/ipfw/ip_dummynet.c
index 429f2f1..4de2156 100644
--- a/sys/netpfil/ipfw/ip_dummynet.c
+++ b/sys/netpfil/ipfw/ip_dummynet.c
@@ -82,13 +82,15 @@ dummynet(void *arg)
{
(void)arg; /* UNUSED */
- taskqueue_enqueue(dn_tq, &dn_task);
+ taskqueue_enqueue_fast(dn_tq, &dn_task);
}
void
dn_reschedule(void)
{
- callout_reset(&dn_timeout, 1, dummynet, NULL);
+
+ callout_reset_sbt(&dn_timeout, tick_sbt, 0, dummynet, NULL,
+ C_HARDCLOCK | C_DIRECT_EXEC);
}
/*----- end of callout hooks -----*/
@@ -2159,12 +2161,12 @@ ip_dn_init(void)
DN_LOCK_INIT();
TASK_INIT(&dn_task, 0, dummynet_task, curvnet);
- dn_tq = taskqueue_create("dummynet", M_WAITOK,
+ dn_tq = taskqueue_create_fast("dummynet", M_WAITOK,
taskqueue_thread_enqueue, &dn_tq);
taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
callout_init(&dn_timeout, CALLOUT_MPSAFE);
- callout_reset(&dn_timeout, 1, dummynet, NULL);
+ dn_reschedule();
/* Initialize curr_time adjustment mechanics. */
getmicrouptime(&dn_cfg.prev_t);
diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c
index 6317013..128afad 100644
--- a/sys/netpfil/ipfw/ip_fw2.c
+++ b/sys/netpfil/ipfw/ip_fw2.c
@@ -142,6 +142,8 @@ VNET_DEFINE(int, verbose_limit);
/* layer3_chain contains the list of rules for layer 3 */
VNET_DEFINE(struct ip_fw_chain, layer3_chain);
+VNET_DEFINE(int, ipfw_nat_ready) = 0;
+
ipfw_nat_t *ipfw_nat_ptr = NULL;
struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int);
ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
diff --git a/sys/netpfil/ipfw/ip_fw_nat.c b/sys/netpfil/ipfw/ip_fw_nat.c
index 84852db..155eddd 100644
--- a/sys/netpfil/ipfw/ip_fw_nat.c
+++ b/sys/netpfil/ipfw/ip_fw_nat.c
@@ -53,8 +53,7 @@ __FBSDID("$FreeBSD$");
#include <machine/in_cksum.h> /* XXX for in_cksum */
-static VNET_DEFINE(eventhandler_tag, ifaddr_event_tag);
-#define V_ifaddr_event_tag VNET(ifaddr_event_tag)
+static eventhandler_tag ifaddr_event_tag;
static void
ifaddr_change(void *arg __unused, struct ifnet *ifp)
@@ -63,6 +62,8 @@ ifaddr_change(void *arg __unused, struct ifnet *ifp)
struct ifaddr *ifa;
struct ip_fw_chain *chain;
+ KASSERT(curvnet == ifp->if_vnet,
+ ("curvnet(%p) differs from iface vnet(%p)", curvnet, ifp->if_vnet));
chain = &V_layer3_chain;
IPFW_WLOCK(chain);
/* Check every nat entry... */
@@ -589,11 +590,38 @@ ipfw_nat_get_log(struct sockopt *sopt)
return(0);
}
+static int
+vnet_ipfw_nat_init(const void *arg __unused)
+{
+
+ V_ipfw_nat_ready = 1;
+ return (0);
+}
+
+static int
+vnet_ipfw_nat_uninit(const void *arg __unused)
+{
+ struct cfg_nat *ptr, *ptr_temp;
+ struct ip_fw_chain *chain;
+
+ chain = &V_layer3_chain;
+ IPFW_WLOCK(chain);
+ LIST_FOREACH_SAFE(ptr, &chain->nat, _next, ptr_temp) {
+ LIST_REMOVE(ptr, _next);
+ del_redir_spool_cfg(ptr, &ptr->redir_chain);
+ LibAliasUninit(ptr->lib);
+ free(ptr, M_IPFW);
+ }
+ flush_nat_ptrs(chain, -1 /* flush all */);
+ V_ipfw_nat_ready = 0;
+ IPFW_WUNLOCK(chain);
+ return (0);
+}
+
static void
ipfw_nat_init(void)
{
- IPFW_WLOCK(&V_layer3_chain);
/* init ipfw hooks */
ipfw_nat_ptr = ipfw_nat;
lookup_nat_ptr = lookup_nat;
@@ -601,28 +629,16 @@ ipfw_nat_init(void)
ipfw_nat_del_ptr = ipfw_nat_del;
ipfw_nat_get_cfg_ptr = ipfw_nat_get_cfg;
ipfw_nat_get_log_ptr = ipfw_nat_get_log;
- IPFW_WUNLOCK(&V_layer3_chain);
- V_ifaddr_event_tag = EVENTHANDLER_REGISTER(
- ifaddr_event, ifaddr_change,
+
+ ifaddr_event_tag = EVENTHANDLER_REGISTER(ifaddr_event, ifaddr_change,
NULL, EVENTHANDLER_PRI_ANY);
}
static void
ipfw_nat_destroy(void)
{
- struct cfg_nat *ptr, *ptr_temp;
- struct ip_fw_chain *chain;
- chain = &V_layer3_chain;
- IPFW_WLOCK(chain);
- LIST_FOREACH_SAFE(ptr, &chain->nat, _next, ptr_temp) {
- LIST_REMOVE(ptr, _next);
- del_redir_spool_cfg(ptr, &ptr->redir_chain);
- LibAliasUninit(ptr->lib);
- free(ptr, M_IPFW);
- }
- EVENTHANDLER_DEREGISTER(ifaddr_event, V_ifaddr_event_tag);
- flush_nat_ptrs(chain, -1 /* flush all */);
+ EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_event_tag);
/* deregister ipfw_nat */
ipfw_nat_ptr = NULL;
lookup_nat_ptr = NULL;
@@ -630,7 +646,6 @@ ipfw_nat_destroy(void)
ipfw_nat_del_ptr = NULL;
ipfw_nat_get_cfg_ptr = NULL;
ipfw_nat_get_log_ptr = NULL;
- IPFW_WUNLOCK(chain);
}
static int
@@ -640,11 +655,9 @@ ipfw_nat_modevent(module_t mod, int type, void *unused)
switch (type) {
case MOD_LOAD:
- ipfw_nat_init();
break;
case MOD_UNLOAD:
- ipfw_nat_destroy();
break;
default:
@@ -660,8 +673,25 @@ static moduledata_t ipfw_nat_mod = {
0
};
-DECLARE_MODULE(ipfw_nat, ipfw_nat_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+/* Define startup order. */
+#define IPFW_NAT_SI_SUB_FIREWALL SI_SUB_PROTO_IFATTACHDOMAIN
+#define IPFW_NAT_MODEVENT_ORDER (SI_ORDER_ANY - 255)
+#define IPFW_NAT_MODULE_ORDER (IPFW_NAT_MODEVENT_ORDER + 1)
+#define IPFW_NAT_VNET_ORDER (IPFW_NAT_MODEVENT_ORDER + 2)
+
+DECLARE_MODULE(ipfw_nat, ipfw_nat_mod, IPFW_NAT_SI_SUB_FIREWALL, SI_ORDER_ANY);
MODULE_DEPEND(ipfw_nat, libalias, 1, 1, 1);
MODULE_DEPEND(ipfw_nat, ipfw, 2, 2, 2);
MODULE_VERSION(ipfw_nat, 1);
+
+SYSINIT(ipfw_nat_init, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_MODULE_ORDER,
+ ipfw_nat_init, NULL);
+VNET_SYSINIT(vnet_ipfw_nat_init, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_VNET_ORDER,
+ vnet_ipfw_nat_init, NULL);
+
+SYSUNINIT(ipfw_nat_destroy, IPFW_NAT_SI_SUB_FIREWALL, IPFW_NAT_MODULE_ORDER,
+ ipfw_nat_destroy, NULL);
+VNET_SYSUNINIT(vnet_ipfw_nat_uninit, IPFW_NAT_SI_SUB_FIREWALL,
+ IPFW_NAT_VNET_ORDER, vnet_ipfw_nat_uninit, NULL);
+
/* end of file */
diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h
index a41cdf5..a8d7eea 100644
--- a/sys/netpfil/ipfw/ip_fw_private.h
+++ b/sys/netpfil/ipfw/ip_fw_private.h
@@ -327,9 +327,11 @@ extern struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int);
typedef int ipfw_nat_t(struct ip_fw_args *, struct cfg_nat *, struct mbuf *);
typedef int ipfw_nat_cfg_t(struct sockopt *);
-extern ipfw_nat_t *ipfw_nat_ptr;
-#define IPFW_NAT_LOADED (ipfw_nat_ptr != NULL)
+VNET_DECLARE(int, ipfw_nat_ready);
+#define V_ipfw_nat_ready VNET(ipfw_nat_ready)
+#define IPFW_NAT_LOADED (V_ipfw_nat_ready)
+extern ipfw_nat_t *ipfw_nat_ptr;
extern ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
extern ipfw_nat_cfg_t *ipfw_nat_del_ptr;
extern ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
diff --git a/sys/ofed/include/linux/net.h b/sys/ofed/include/linux/net.h
index f47acf9..f84dee2 100644
--- a/sys/ofed/include/linux/net.h
+++ b/sys/ofed/include/linux/net.h
@@ -44,7 +44,7 @@ static inline int
sock_getname(struct socket *so, struct sockaddr *addr, int *sockaddr_len,
int peer)
{
- struct sockaddr **nam;
+ struct sockaddr *nam;
int error;
nam = NULL;
@@ -52,15 +52,15 @@ sock_getname(struct socket *so, struct sockaddr *addr, int *sockaddr_len,
if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0)
return (-ENOTCONN);
- error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, nam);
+ error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &nam);
} else
- error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, nam);
+ error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &nam);
if (error)
return (-error);
- *addr = **nam;
+ *addr = *nam;
*sockaddr_len = addr->sa_len;
- free(*nam, M_SONAME);
+ free(nam, M_SONAME);
return (0);
}
diff --git a/sys/powerpc/powermac/kiic.c b/sys/powerpc/powermac/kiic.c
index 0b13190..7a60ba2 100644
--- a/sys/powerpc/powermac/kiic.c
+++ b/sys/powerpc/powermac/kiic.c
@@ -420,7 +420,7 @@ kiic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
device_printf(sc->sc_dev, "I2C error\n");
sc->sc_flags = 0;
mtx_unlock(&sc->sc_mutex);
- return (-1);
+ return (EIO);
}
}
diff --git a/sys/sys/conf.h b/sys/sys/conf.h
index fcce859..e9a2f55 100644
--- a/sys/sys/conf.h
+++ b/sys/sys/conf.h
@@ -62,6 +62,7 @@ struct cdev {
#define SI_DUMPDEV 0x0080 /* is kernel dumpdev */
#define SI_CLONELIST 0x0200 /* on a clone list */
#define SI_UNMAPPED 0x0400 /* can handle unmapped I/O */
+#define SI_NOSPLIT 0x0800 /* I/O should not be split up */
struct timespec si_atime;
struct timespec si_ctime;
struct timespec si_mtime;
diff --git a/sys/sys/jail.h b/sys/sys/jail.h
index a934aac..a82a499 100644
--- a/sys/sys/jail.h
+++ b/sys/sys/jail.h
@@ -227,7 +227,8 @@ struct prison_racct {
#define PR_ALLOW_MOUNT_NULLFS 0x0100
#define PR_ALLOW_MOUNT_ZFS 0x0200
#define PR_ALLOW_MOUNT_PROCFS 0x0400
-#define PR_ALLOW_ALL 0x07ff
+#define PR_ALLOW_MOUNT_TMPFS 0x0800
+#define PR_ALLOW_ALL 0x0fff
/*
* OSD methods
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index f7bf303..cafbbdf 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -138,16 +138,19 @@ struct pkthdr {
/*
* Description of external storage mapped into mbuf; valid only if M_EXT is
* set.
+ * Size ILP32: 28
+ * LP64: 48
*/
struct m_ext {
+ volatile u_int *ref_cnt; /* pointer to ref count info */
caddr_t ext_buf; /* start of buffer */
+ uint32_t ext_size; /* size of buffer, for ext_free */
+ uint32_t ext_type:8, /* type of external storage */
+ ext_flags:24; /* external storage mbuf flags */
void (*ext_free) /* free routine if not the usual */
(void *, void *);
void *ext_arg1; /* optional argument pointer */
void *ext_arg2; /* optional argument pointer */
- u_int ext_size; /* size of buffer, for ext_free */
- volatile u_int *ref_cnt; /* pointer to ref count info */
- int ext_type; /* type of external storage */
};
/*
@@ -269,7 +272,7 @@ struct mbuf {
M_PROTOFLAGS|M_HASHTYPEBITS)
/*
- * External buffer types: identify ext_buf type.
+ * External mbuf storage buffer types.
*/
#define EXT_CLUSTER 1 /* mbuf cluster */
#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
@@ -278,10 +281,48 @@ struct mbuf {
#define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
#define EXT_PACKET 6 /* mbuf+cluster from packet zone */
#define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */
-#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
-#define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
-#define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */
-#define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */
+
+#define EXT_VENDOR1 224 /* for vendor-internal use */
+#define EXT_VENDOR2 225 /* for vendor-internal use */
+#define EXT_VENDOR3 226 /* for vendor-internal use */
+#define EXT_VENDOR4 227 /* for vendor-internal use */
+
+#define EXT_EXP1 244 /* for experimental use */
+#define EXT_EXP2 245 /* for experimental use */
+#define EXT_EXP3 246 /* for experimental use */
+#define EXT_EXP4 247 /* for experimental use */
+
+#define EXT_NET_DRV 252 /* custom ext_buf provided by net driver(s) */
+#define EXT_MOD_TYPE 253 /* custom module's ext_buf type */
+#define EXT_DISPOSABLE 254 /* can throw this buffer away w/page flipping */
+#define EXT_EXTREF 255 /* has externally maintained ref_cnt ptr */
+
+/*
+ * Flags for external mbuf buffer types.
+ * NB: limited to the lower 24 bits.
+ */
+#define EXT_FLAG_EMBREF 0x000001 /* embedded ref_cnt, notyet */
+#define EXT_FLAG_EXTREF 0x000002 /* external ref_cnt, notyet */
+#define EXT_FLAG_NOFREE 0x000010 /* don't free mbuf to pool, notyet */
+
+#define EXT_FLAG_VENDOR1 0x010000 /* for vendor-internal use */
+#define EXT_FLAG_VENDOR2 0x020000 /* for vendor-internal use */
+#define EXT_FLAG_VENDOR3 0x040000 /* for vendor-internal use */
+#define EXT_FLAG_VENDOR4 0x080000 /* for vendor-internal use */
+
+#define EXT_FLAG_EXP1 0x100000 /* for experimental use */
+#define EXT_FLAG_EXP2 0x200000 /* for experimental use */
+#define EXT_FLAG_EXP3 0x400000 /* for experimental use */
+#define EXT_FLAG_EXP4 0x800000 /* for experimental use */
+
+/*
+ * EXT flag description for use with printf(9) %b identifier.
+ */
+#define EXT_FLAG_BITS \
+ "\20\1EXT_FLAG_EMBREF\2EXT_FLAG_EXTREF\5EXT_FLAG_NOFREE" \
+ "\21EXT_FLAG_VENDOR1\22EXT_FLAG_VENDOR2\23EXT_FLAG_VENDOR3" \
+ "\24EXT_FLAG_VENDOR4\25EXT_FLAG_EXP1\26EXT_FLAG_EXP2\27EXT_FLAG_EXP3" \
+ "\30EXT_FLAG_EXP4"
/*
* Flags indicating hw checksum support and sw checksum requirements. This
@@ -605,6 +646,7 @@ m_cljset(struct mbuf *m, void *cl, int type)
m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL;
m->m_ext.ext_size = size;
m->m_ext.ext_type = type;
+ m->m_ext.ext_flags = 0;
m->m_ext.ref_cnt = uma_find_refcnt(zone, cl);
m->m_flags |= M_EXT;
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index a953dae..1a835b7 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -628,6 +628,7 @@ struct vfsops {
vfs_susp_clean_t *vfs_susp_clean;
vfs_notify_lowervp_t *vfs_reclaim_lowervp;
vfs_notify_lowervp_t *vfs_unlink_lowervp;
+ vfs_mount_t *vfs_spare[6]; /* spares for ABI compat */
};
vfs_statfs_t __vfs_statfs;
diff --git a/sys/sys/param.h b/sys/sys/param.h
index d5905fe..67f4835 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1000048 /* Master, propagated to newvers */
+#define __FreeBSD_version 1000049 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/usr.sbin/jail/jail.8 b/usr.sbin/jail/jail.8
index b96cfef..88d69f9 100644
--- a/usr.sbin/jail/jail.8
+++ b/usr.sbin/jail/jail.8
@@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd September 15, 2012
+.Dd August 23, 2013
.Dt JAIL 8
.Os
.Sh NAME
@@ -545,6 +545,14 @@ This permission is effective only together with
and if
.Va enforce_statfs
is set to a value lower than 2.
+.It Va allow.mount.tmpfs
+privileged users inside the jail will be able to mount and unmount the
+tmpfs file system.
+This permission is effective only together with
+.Va allow.mount
+and if
+.Va enforce_statfs
+is set to a value lower than 2.
.It Va allow.mount.zfs
privileged users inside the jail will be able to mount and unmount the
ZFS file system.
OpenPOWER on IntegriCloud