summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-03-07 23:43:03 +0000
committerattilio <attilio@FreeBSD.org>2013-03-07 23:43:03 +0000
commit640e058da3cee2710fd66a67abbb548aad74104e (patch)
treed7c35ea5ace438f4006e9f03068543a6f001ad74
parent1be810ec7308568f15799ceb7f6ad7162994537c (diff)
parent8f87944624f4ab56ac387d7b3d251630641df1f6 (diff)
downloadFreeBSD-src-640e058da3cee2710fd66a67abbb548aad74104e.zip
FreeBSD-src-640e058da3cee2710fd66a67abbb548aad74104e.tar.gz
MFC
-rw-r--r--UPDATING13
-rw-r--r--cddl/contrib/opensolaris/cmd/zdb/zdb.c70
-rw-r--r--cddl/contrib/opensolaris/tools/ctf/cvt/output.c4
-rw-r--r--contrib/openpam/doc/man/openpam_straddch.321
-rw-r--r--contrib/openpam/lib/openpam_readline.c6
-rw-r--r--contrib/openpam/lib/openpam_readword.c7
-rw-r--r--crypto/openssh/FREEBSD-upgrade1
-rw-r--r--crypto/openssh/config.h51
-rw-r--r--etc/portsnap.conf1
-rw-r--r--lib/libc/string/Makefile.inc3
-rw-r--r--lib/libstand/Makefile3
-rw-r--r--lib/libutil/gr_util.c137
-rw-r--r--lib/libutil/libutil.h2
-rw-r--r--lib/libyaml/Makefile1
-rw-r--r--lib/libyaml/libbsdyml.3 (renamed from sbin/mount_ext2fs/mount_ext2fs.8)71
-rw-r--r--sbin/ipfw/ipfw2.c17
-rw-r--r--sbin/mount_cd9660/mount_cd9660.86
-rw-r--r--sbin/mount_cd9660/mount_cd9660.c6
-rw-r--r--sbin/mount_ext2fs/Makefile14
-rw-r--r--sbin/mount_ext2fs/mount_ext2fs.c125
-rw-r--r--sbin/mount_msdosfs/mount_msdosfs.c7
-rw-r--r--sbin/mount_nfs/mount_nfs.c13
-rw-r--r--sbin/mount_nullfs/mount_nullfs.c5
-rw-r--r--sbin/mount_reiserfs/Makefile13
-rw-r--r--sbin/mount_reiserfs/mount_reiserfs.890
-rw-r--r--sbin/mount_reiserfs/mount_reiserfs.c108
-rw-r--r--sbin/mount_std/Makefile23
-rw-r--r--sbin/mount_std/mount_std.8167
-rw-r--r--sbin/mount_std/mount_std.c160
-rw-r--r--sbin/mount_udf/Makefile1
-rw-r--r--sbin/mount_udf/mount_udf.c46
-rw-r--r--sbin/mount_unionfs/mount_unionfs.c5
-rw-r--r--share/man/man4/eventtimers.48
-rw-r--r--share/man/man4/zyd.43
-rw-r--r--share/man/man7/development.78
-rw-r--r--share/man/man7/ports.714
-rw-r--r--share/man/man9/Makefile8
-rw-r--r--share/man/man9/condvar.932
-rw-r--r--share/man/man9/sleep.942
-rw-r--r--share/man/man9/sleepqueue.926
-rw-r--r--share/man/man9/timeout.935
-rw-r--r--sys/amd64/conf/GENERIC5
-rw-r--r--sys/amd64/conf/NOTES7
-rw-r--r--sys/arm/arm/swtch.S5
-rw-r--r--sys/arm/arm/vm_machdep.c16
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_dma.c2
-rw-r--r--sys/arm/include/frame.h6
-rw-r--r--sys/cam/ctl/ctl.c6
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c8
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c9
-rw-r--r--sys/conf/files.amd6410
-rw-r--r--sys/conf/files.i38610
-rw-r--r--sys/conf/options3
-rw-r--r--sys/contrib/altq/altq/altq_hfsc.c99
-rw-r--r--sys/contrib/altq/altq/altq_hfsc.h18
-rw-r--r--sys/dev/drm2/drmP.h10
-rw-r--r--sys/dev/drm2/drm_drv.c21
-rw-r--r--sys/dev/drm2/drm_gem.c6
-rw-r--r--sys/dev/drm2/drm_global.c110
-rw-r--r--sys/dev/drm2/drm_global.h56
-rw-r--r--sys/dev/drm2/drm_mm.c37
-rw-r--r--sys/dev/drm2/drm_mm.h2
-rw-r--r--sys/dev/drm2/ttm/ttm_agp_backend.c145
-rw-r--r--sys/dev/drm2/ttm/ttm_bo.c1820
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_api.h740
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_driver.h1018
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_manager.c157
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_util.c658
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_vm.c492
-rw-r--r--sys/dev/drm2/ttm/ttm_execbuf_util.c230
-rw-r--r--sys/dev/drm2/ttm/ttm_execbuf_util.h109
-rw-r--r--sys/dev/drm2/ttm/ttm_lock.c340
-rw-r--r--sys/dev/drm2/ttm/ttm_lock.h228
-rw-r--r--sys/dev/drm2/ttm/ttm_memory.c471
-rw-r--r--sys/dev/drm2/ttm/ttm_memory.h149
-rw-r--r--sys/dev/drm2/ttm/ttm_module.h37
-rw-r--r--sys/dev/drm2/ttm/ttm_object.c455
-rw-r--r--sys/dev/drm2/ttm/ttm_object.h271
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.c900
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.h103
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc_dma.c1134
-rw-r--r--sys/dev/drm2/ttm/ttm_placement.h93
-rw-r--r--sys/dev/drm2/ttm/ttm_tt.c370
-rw-r--r--sys/dev/hwpmc/hwpmc_soft.c21
-rw-r--r--sys/dev/ixgbe/LICENSE2
-rw-r--r--sys/dev/ixgbe/ixgbe.c134
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c60
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c382
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.h9
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c102
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h6
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c276
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h23
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.h14
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c55
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h22
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h51
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c125
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.h11
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.c10
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.h4
-rw-r--r--sys/dev/ixgbe/ixv.c8
-rw-r--r--sys/dev/nvme/nvme.c8
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c4
-rw-r--r--sys/dev/oce/oce_hw.c8
-rw-r--r--sys/dev/oce/oce_hw.h152
-rw-r--r--sys/dev/oce/oce_if.c240
-rw-r--r--sys/dev/oce/oce_if.h47
-rw-r--r--sys/dev/oce/oce_mbox.c429
-rw-r--r--sys/dev/oce/oce_queue.c61
-rw-r--r--sys/dev/oce/oce_sysctl.c52
-rw-r--r--sys/dev/oce/oce_util.c5
-rw-r--r--sys/dev/random/randomdev_soft.c7
-rw-r--r--sys/dev/sound/pci/hda/hdaa_patches.c17
-rw-r--r--sys/dev/sound/pci/hda/hdac.h5
-rw-r--r--sys/dev/sound/pci/hda/hdacc.c6
-rw-r--r--sys/dev/syscons/syscons.c58
-rw-r--r--sys/dev/syscons/syscons.h2
-rw-r--r--sys/dev/virtio/block/virtio_blk.c49
-rw-r--r--sys/geom/label/g_label_ntfs.c6
-rw-r--r--sys/geom/part/g_part.c1
-rw-r--r--sys/geom/part/g_part.h1
-rw-r--r--sys/geom/part/g_part_mbr.c1
-rw-r--r--sys/geom/raid/md_ddf.c5
-rw-r--r--sys/i386/conf/GENERIC5
-rw-r--r--sys/i386/conf/NOTES7
-rw-r--r--sys/kern/kern_clock.c3
-rw-r--r--sys/kern/kern_event.c87
-rw-r--r--sys/kern/kern_resource.c17
-rw-r--r--sys/kern/kern_time.c99
-rw-r--r--sys/kern/kern_timeout.c38
-rw-r--r--sys/kern/subr_log.c23
-rw-r--r--sys/kern/sys_generic.c94
-rw-r--r--sys/modules/drm2/drm2/Makefile17
-rw-r--r--sys/modules/uart/Makefile1
-rw-r--r--sys/net/route.c4
-rw-r--r--sys/netinet/siftr.c2
-rw-r--r--sys/sparc64/pci/psycho.c2
-rw-r--r--sys/sys/diskmbr.h1
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/pmckern.h9
-rw-r--r--tools/tools/cxgbetool/cxgbetool.c1
-rw-r--r--usr.bin/c89/c89.c2
-rw-r--r--usr.bin/c99/c99.c2
-rw-r--r--usr.sbin/bhyve/acpi.c16
-rw-r--r--usr.sbin/bhyve/pci_virtio_block.c17
-rw-r--r--usr.sbin/bhyve/pci_virtio_net.c15
-rw-r--r--usr.sbin/pkg/Makefile6
-rw-r--r--usr.sbin/pkg/config.c428
-rw-r--r--usr.sbin/pkg/config.h52
-rw-r--r--usr.sbin/pkg/pkg.c186
-rw-r--r--usr.sbin/rtadvd/rtadvd.c2
154 files changed, 13408 insertions, 1955 deletions
diff --git a/UPDATING b/UPDATING
index 50e6452..99af0ed 100644
--- a/UPDATING
+++ b/UPDATING
@@ -26,6 +26,19 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 10.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
+20130304:
+ Recent commits to callout(9) changed the size of struct callout,
+ so the KBI is probably heavily disturbed. Also, some functions
+ in callout(9)/sleep(9)/sleepqueue(9)/condvar(9) KPIs were replaced
+ by macros. Every kernel module using it won't load, so rebuild
+ is requested.
+
+ The ctl device has been re-enabled in GENERIC for i386 and amd64,
+ but does not initialize by default (because of the new CTL_DISABLE
+ option) to save memory. To re-enable it, remove the CTL_DISABLE
+ option from the kernel config file or set kern.cam.ctl.disable=0
+ in /boot/loader.conf.
+
20130301:
The ctl device has been disabled in GENERIC for i386 and amd64.
This was done due to the extra memory being allocated at system
diff --git a/cddl/contrib/opensolaris/cmd/zdb/zdb.c b/cddl/contrib/opensolaris/cmd/zdb/zdb.c
index 0238c65..2f0e658 100644
--- a/cddl/contrib/opensolaris/cmd/zdb/zdb.c
+++ b/cddl/contrib/opensolaris/cmd/zdb/zdb.c
@@ -1189,7 +1189,7 @@ dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
}
static void
-dump_bpobj(bpobj_t *bpo, char *name)
+dump_bpobj(bpobj_t *bpo, char *name, int indent)
{
char bytes[32];
char comp[32];
@@ -1199,31 +1199,56 @@ dump_bpobj(bpobj_t *bpo, char *name)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes);
- if (bpo->bpo_havesubobj) {
+ if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp);
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp);
- (void) printf("\n %s: %llu local blkptrs, %llu subobjs, "
- "%s (%s/%s comp)\n",
- name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
+ (void) printf(" %*s: object %llu, %llu local blkptrs, "
+ "%llu subobjs, %s (%s/%s comp)\n",
+ indent * 8, name,
+ (u_longlong_t)bpo->bpo_object,
+ (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
bytes, comp, uncomp);
+
+ for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
+ uint64_t subobj;
+ bpobj_t subbpo;
+ int error;
+ VERIFY0(dmu_read(bpo->bpo_os,
+ bpo->bpo_phys->bpo_subobjs,
+ i * sizeof (subobj), sizeof (subobj), &subobj, 0));
+ error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
+ if (error != 0) {
+ (void) printf("ERROR %u while trying to open "
+ "subobj id %llu\n",
+ error, (u_longlong_t)subobj);
+ continue;
+ }
+ dump_bpobj(&subbpo, "subobj", indent + 1);
+ }
} else {
- (void) printf("\n %s: %llu blkptrs, %s\n",
- name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, bytes);
+ (void) printf(" %*s: object %llu, %llu blkptrs, %s\n",
+ indent * 8, name,
+ (u_longlong_t)bpo->bpo_object,
+ (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
+ bytes);
}
if (dump_opt['d'] < 5)
return;
- (void) printf("\n");
- (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
+ if (indent == 0) {
+ (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
+ (void) printf("\n");
+ }
}
static void
dump_deadlist(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
+ uint64_t unused;
char bytes[32];
char comp[32];
char uncomp[32];
@@ -1242,14 +1267,24 @@ dump_deadlist(dsl_deadlist_t *dl)
(void) printf("\n");
+ /* force the tree to be loaded */
+ dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused);
+
for (dle = avl_first(&dl->dl_tree); dle;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
- (void) printf(" mintxg %llu -> obj %llu\n",
- (longlong_t)dle->dle_mintxg,
- (longlong_t)dle->dle_bpobj.bpo_object);
+ if (dump_opt['d'] >= 5) {
+ char buf[128];
+ (void) snprintf(buf, sizeof (buf), "mintxg %llu -> ",
+ (longlong_t)dle->dle_mintxg,
+ (longlong_t)dle->dle_bpobj.bpo_object);
- if (dump_opt['d'] >= 5)
- dump_bpobj(&dle->dle_bpobj, "");
+ dump_bpobj(&dle->dle_bpobj, buf, 0);
+ } else {
+ (void) printf("mintxg %llu -> obj %llu\n",
+ (longlong_t)dle->dle_mintxg,
+ (longlong_t)dle->dle_bpobj.bpo_object);
+
+ }
}
}
@@ -1272,7 +1307,7 @@ fuid_table_destroy()
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
- * the doman-rid string.
+ * the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
@@ -2529,10 +2564,11 @@ dump_zpool(spa_t *spa)
if (dump_opt['d'] || dump_opt['i']) {
dump_dir(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
- dump_bpobj(&spa->spa_deferred_bpobj, "Deferred frees");
+ dump_bpobj(&spa->spa_deferred_bpobj,
+ "Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_bpobj(&spa->spa_dsl_pool->dp_free_bpobj,
- "Pool snapshot frees");
+ "Pool snapshot frees", 0);
}
if (spa_feature_is_active(spa,
diff --git a/cddl/contrib/opensolaris/tools/ctf/cvt/output.c b/cddl/contrib/opensolaris/tools/ctf/cvt/output.c
index bfe5b7e..af79769 100644
--- a/cddl/contrib/opensolaris/tools/ctf/cvt/output.c
+++ b/cddl/contrib/opensolaris/tools/ctf/cvt/output.c
@@ -363,6 +363,7 @@ sort_iidescs(Elf *elf, const char *file, tdata_t *td, int fuzzymatch,
for (i = 0; i < nent; i++) {
GElf_Sym sym;
+ char *bname;
iidesc_t **tolist;
GElf_Sym ssym;
iidesc_match_t smatch;
@@ -377,7 +378,8 @@ sort_iidescs(Elf *elf, const char *file, tdata_t *td, int fuzzymatch,
switch (GELF_ST_TYPE(sym.st_info)) {
case STT_FILE:
- match.iim_file = match.iim_name;
+ bname = strrchr(match.iim_name, '/');
+ match.iim_file = bname == NULL ? match.iim_name : bname + 1;
continue;
case STT_OBJECT:
tolist = iiburst->iib_objts;
diff --git a/contrib/openpam/doc/man/openpam_straddch.3 b/contrib/openpam/doc/man/openpam_straddch.3
index c555824..ea11c93 100644
--- a/contrib/openpam/doc/man/openpam_straddch.3
+++ b/contrib/openpam/doc/man/openpam_straddch.3
@@ -34,7 +34,7 @@
.\"
.\" $Id$
.\"
-.Dd May 26, 2012
+.Dd March 3, 2013
.Dt OPENPAM_STRADDCH 3
.Os
.Sh NAME
@@ -73,6 +73,21 @@ and
argument point to variables used to hold the size
of the buffer and the length of the string it contains, respectively.
.Pp
+The final argument,
+.Fa ch ,
+is the character that should be appended to
+the string. If
+.Fa ch
+is 0, nothing is appended, but a new buffer is
+still allocated if
+.Fa str
+is NULL. This can be used to
+.Do
+bootstrap
+.Dc
+the
+string.
+.Pp
If a new buffer is allocated or an existing buffer is reallocated to
make room for the additional character,
.Fa str
@@ -91,7 +106,9 @@ If the
function is successful, it increments the
integer variable pointed to by
.Fa len
-and returns 0.
+(unless
+.Fa ch
+was 0) and returns 0.
Otherwise, it leaves the variables pointed to by
.Fa str ,
.Fa size
diff --git a/contrib/openpam/lib/openpam_readline.c b/contrib/openpam/lib/openpam_readline.c
index 014acfb..047ab8369 100644
--- a/contrib/openpam/lib/openpam_readline.c
+++ b/contrib/openpam/lib/openpam_readline.c
@@ -62,11 +62,9 @@ openpam_readline(FILE *f, int *lineno, size_t *lenp)
size_t len, size;
int ch;
- if ((line = malloc(size = MIN_LINE_LENGTH)) == NULL) {
- openpam_log(PAM_LOG_ERROR, "malloc(): %m");
+ line = NULL;
+ if (openpam_straddch(&line, &size, &len, 0) != 0)
return (NULL);
- }
- len = 0;
for (;;) {
ch = fgetc(f);
/* strip comment */
diff --git a/contrib/openpam/lib/openpam_readword.c b/contrib/openpam/lib/openpam_readword.c
index 74a4d46..1c0e9b6 100644
--- a/contrib/openpam/lib/openpam_readword.c
+++ b/contrib/openpam/lib/openpam_readword.c
@@ -86,13 +86,8 @@ openpam_readword(FILE *f, int *lineno, size_t *lenp)
/* begin quote */
quote = ch;
/* edge case: empty quoted string */
- if (word == NULL && (word = malloc(1)) == NULL) {
- openpam_log(PAM_LOG_ERROR, "malloc(): %m");
- errno = ENOMEM;
+ if (openpam_straddch(&word, &size, &len, 0) != 0)
return (NULL);
- }
- *word = '\0';
- size = 1;
} else if (ch == quote && !escape) {
/* end quote */
quote = 0;
diff --git a/crypto/openssh/FREEBSD-upgrade b/crypto/openssh/FREEBSD-upgrade
index 03b7dbf..0d88fb7 100644
--- a/crypto/openssh/FREEBSD-upgrade
+++ b/crypto/openssh/FREEBSD-upgrade
@@ -43,6 +43,7 @@
7) Run configure with the appropriate arguments:
$ ./configure --prefix=/usr --sysconfdir=/etc/ssh \
+ --disable-lastlog --disable-utmp --disable-wtmp \
--with-pam --with-tcp-wrappers --with-libedit \
--with-ssl-engine
diff --git a/crypto/openssh/config.h b/crypto/openssh/config.h
index 49f4ec3..782708b 100644
--- a/crypto/openssh/config.h
+++ b/crypto/openssh/config.h
@@ -17,6 +17,9 @@
/* Define if your resolver libs need this for getrrsetbyname */
/* #undef BIND_8_COMPAT */
+/* The system has incomplete BSM API */
+/* #undef BROKEN_BSM_API */
+
/* Define if cmsg_type is not passed correctly */
/* #undef BROKEN_CMSG_TYPE */
@@ -97,7 +100,7 @@
/* #undef DISABLE_FD_PASSING */
/* Define if you don't want to use lastlog */
-/* #undef DISABLE_LASTLOG */
+#define DISABLE_LASTLOG 1
/* Define if you don't want to use your system's login() call */
/* #undef DISABLE_LOGIN */
@@ -307,7 +310,7 @@
#define HAVE_DECL__GETSHORT 0
/* Define if you have /dev/ptmx */
-#define HAVE_DEV_PTMX 1
+/* #undef HAVE_DEV_PTMX */
/* Define if you have /dev/ptc */
/* #undef HAVE_DEV_PTS_AND_PTC */
@@ -316,7 +319,7 @@
#define HAVE_DIRENT_H 1
/* Define to 1 if you have the `dirfd' function. */
-/* #undef HAVE_DIRFD */
+#define HAVE_DIRFD 1
/* Define to 1 if you have the `dirname' function. */
#define HAVE_DIRNAME 1
@@ -501,6 +504,9 @@
/* Define if HEADER.ad exists in arpa/nameser.h */
#define HAVE_HEADER_AD 1
+/* Define to 1 if you have the `HMAC_CTX_init' function. */
+#define HAVE_HMAC_CTX_INIT 1
+
/* Define if you have ut_host in utmp.h */
/* #undef HAVE_HOST_IN_UTMP */
@@ -552,6 +558,9 @@
/* Define to 1 if you have the <lastlog.h> header file. */
/* #undef HAVE_LASTLOG_H */
+/* Define if you want ldns support */
+/* #undef HAVE_LDNS */
+
/* Define to 1 if you have the <libaudit.h> header file. */
/* #undef HAVE_LIBAUDIT_H */
@@ -594,10 +603,19 @@
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
+/* Define to 1 if you have the <linux/audit.h> header file. */
+/* #undef HAVE_LINUX_AUDIT_H */
+
+/* Define to 1 if you have the <linux/filter.h> header file. */
+/* #undef HAVE_LINUX_FILTER_H */
+
/* Define to 1 if you have the <linux/if_tun.h> header file. */
/* #undef HAVE_LINUX_IF_TUN_H */
-/* Define if your libraries define login() */
+/* Define to 1 if you have the <linux/seccomp.h> header file. */
+/* #undef HAVE_LINUX_SECCOMP_H */
+
+/* Define to 1 if you have the `login' function. */
/* #undef HAVE_LOGIN */
/* Define to 1 if you have the <login_cap.h> header file. */
@@ -805,6 +823,9 @@
/* Define to 1 if you have the `setgroups' function. */
#define HAVE_SETGROUPS 1
+/* Define to 1 if you have the `setlinebuf' function. */
+#define HAVE_SETLINEBUF 1
+
/* Define to 1 if you have the `setlogin' function. */
#define HAVE_SETLOGIN 1
@@ -931,6 +952,9 @@
/* Define to 1 if you have the `strmode' function. */
#define HAVE_STRMODE 1
+/* Define to 1 if you have the `strnlen' function. */
+#define HAVE_STRNLEN 1
+
/* Define to 1 if you have the `strnvis' function. */
/* #undef HAVE_STRNVIS */
@@ -1172,7 +1196,7 @@
/* #undef HAVE_VHANGUP */
/* Define to 1 if you have the <vis.h> header file. */
-#define HAVE_VIS_H 1
+/* #undef HAVE_VIS_H */
/* Define to 1 if you have the `vsnprintf' function. */
#define HAVE_VSNPRINTF 1
@@ -1351,15 +1375,21 @@
/* Sandbox using setrlimit(2) */
#define SANDBOX_RLIMIT 1
+/* Sandbox using seccomp filter */
+/* #undef SANDBOX_SECCOMP_FILTER */
+
+/* setrlimit RLIMIT_FSIZE works */
+/* #undef SANDBOX_SKIP_RLIMIT_FSIZE */
+
/* Sandbox using systrace(4) */
/* #undef SANDBOX_SYSTRACE */
+/* Specify the system call convention in use */
+/* #undef SECCOMP_AUDIT_ARCH */
+
/* Define if your platform breaks doing a seteuid before a setuid */
/* #undef SETEUID_BREAKS_SETUID */
-/* The size of `char', as computed by sizeof. */
-#define SIZEOF_CHAR 1
-
/* The size of `int', as computed by sizeof. */
#define SIZEOF_INT 4
@@ -1500,6 +1530,11 @@
/* Define if xauth is found in your path */
/* #undef XAUTH_PATH */
+/* Enable large inode numbers on Mac OS X 10.5. */
+#ifndef _DARWIN_USE_64_BIT_INODE
+# define _DARWIN_USE_64_BIT_INODE 1
+#endif
+
/* Number of bits in a file offset, on hosts where this is settable. */
/* #undef _FILE_OFFSET_BITS */
diff --git a/etc/portsnap.conf b/etc/portsnap.conf
index c209445..eca429f 100644
--- a/etc/portsnap.conf
+++ b/etc/portsnap.conf
@@ -30,6 +30,5 @@ KEYPRINT=9b5feee6d69f170e3dd0a2c8e469ddbd64f13f978f2f3aede40c98633216c330
# REFUSE korean polish portuguese russian ukrainian vietnamese
# List of INDEX files to build and the DESCRIBE file to use for each
-INDEX INDEX-7 DESCRIBE.7
INDEX INDEX-8 DESCRIBE.8
INDEX INDEX-9 DESCRIBE.9
diff --git a/lib/libc/string/Makefile.inc b/lib/libc/string/Makefile.inc
index f98dc92..b997b7b 100644
--- a/lib/libc/string/Makefile.inc
+++ b/lib/libc/string/Makefile.inc
@@ -46,7 +46,8 @@ MLINKS+=strcasecmp.3 strncasecmp.3 \
strcasecmp.3 strcasecmp_l.3 \
strcasecmp.3 strncasecmp_l.3
MLINKS+=strcat.3 strncat.3
-MLINKS+=strchr.3 strrchr.3
+MLINKS+=strchr.3 strrchr.3 \
+ strchr.3 strchrnul.3
MLINKS+=strcmp.3 strncmp.3
MLINKS+=strcoll.3 strcoll_l.3
MLINKS+=strcpy.3 stpcpy.3 \
diff --git a/lib/libstand/Makefile b/lib/libstand/Makefile
index cfc39e9..2886e92 100644
--- a/lib/libstand/Makefile
+++ b/lib/libstand/Makefile
@@ -67,6 +67,9 @@ SRCS+= divsi3.S
.else
# Compiler support functions
.PATH: ${.CURDIR}/../../contrib/compiler-rt/lib/
+# __clzsi2 and ctzsi2 for various builtin functions
+SRCS+= clzsi2.c ctzsi2.c
+# Divide and modulus functions called by the compiler
SRCS+= divmoddi4.c divmodsi4.c divdi3.c divsi3.c moddi3.c modsi3.c
SRCS+= udivmoddi4.c udivmodsi4.c udivdi3.c udivsi3.c umoddi3.c umodsi3.c
diff --git a/lib/libutil/gr_util.c b/lib/libutil/gr_util.c
index 3f7e199..8df3999 100644
--- a/lib/libutil/gr_util.c
+++ b/lib/libutil/gr_util.c
@@ -49,6 +49,8 @@ static char group_dir[PATH_MAX];
static char group_file[PATH_MAX];
static char tempname[PATH_MAX];
static int initialized;
+static size_t grmemlen(const struct group *, const char *, int *);
+static struct group *grcopy(const struct group *gr, struct group *newgr, const char *, int ndx);
/*
* Initialize statics
@@ -429,90 +431,121 @@ gr_make(const struct group *gr)
struct group *
gr_dup(const struct group *gr)
{
+ return (gr_add(gr, NULL));
+}
+/*
+ * Add a new member name to a struct group.
+ */
+struct group *
+gr_add(const struct group *gr, const char *newmember)
+{
struct group *newgr;
- char *dst;
size_t len;
- int ndx;
int num_mem;
- /* Calculate size of the group. */
- len = sizeof(*newgr);
- if (gr->gr_name != NULL)
- len += strlen(gr->gr_name) + 1;
- if (gr->gr_passwd != NULL)
- len += strlen(gr->gr_passwd) + 1;
- if (gr->gr_mem != NULL) {
- for (num_mem = 0; gr->gr_mem[num_mem] != NULL; num_mem++)
- len += strlen(gr->gr_mem[num_mem]) + 1;
- len += (num_mem + 1) * sizeof(*gr->gr_mem);
- } else
- num_mem = -1;
+ num_mem = 0;
+ len = grmemlen(gr, newmember, &num_mem);
/* Create new group and copy old group into it. */
if ((newgr = malloc(len)) == NULL)
return (NULL);
- /* point new gr_mem to end of struct + 1 */
- if (gr->gr_mem != NULL)
+ return (grcopy(gr, newgr, newmember, num_mem));
+}
+
+/* It is safer to walk the pointers given at gr_mem since there is no
+ * guarantee the gr_mem + strings are continguous in the given struct group
+ * but compact the new group into the following form.
+ *
+ * The new struct is laid out like this in memory. The example given is
+ * for a group with two members only.
+ *
+ * {
+ * (char *name)
+ * (char *passwd)
+ * (int gid)
+ * (gr_mem * newgrp + sizeof(struct group) + sizeof(**)) points to gr_mem area
+ * gr_mem area
+ * (member1 *)
+ * (member2 *)
+ * (NULL)
+ * (name string)
+ * (passwd string)
+ * (member1 string)
+ * (member2 string)
+ * }
+ */
+/*
+ * Copy the guts of a group plus given name to a preallocated group struct
+ */
+static struct group *
+grcopy(const struct group *gr, struct group *newgr, const char *name, int ndx)
+{
+ char *dst;
+ int i;
+
+ if (name != NULL)
+ ndx++;
+ /* point new gr_mem to end of struct + 1 if there are names */
+ if (ndx != 0)
newgr->gr_mem = (char **)(newgr + 1);
else
newgr->gr_mem = NULL;
/* point dst after the end of all the gr_mem pointers in newgr */
- dst = (char *)&newgr->gr_mem[num_mem + 1];
+ dst = (char *)&newgr->gr_mem[ndx + 1];
if (gr->gr_name != NULL) {
newgr->gr_name = dst;
dst = stpcpy(dst, gr->gr_name) + 1;
- } else {
+ } else
newgr->gr_name = NULL;
- }
if (gr->gr_passwd != NULL) {
newgr->gr_passwd = dst;
dst = stpcpy(dst, gr->gr_passwd) + 1;
- } else {
+ } else
newgr->gr_passwd = NULL;
- }
newgr->gr_gid = gr->gr_gid;
- if (gr->gr_mem != NULL) {
- for (ndx = 0; ndx < num_mem; ndx++) {
- newgr->gr_mem[ndx] = dst;
- dst = stpcpy(dst, gr->gr_mem[ndx]) + 1;
+ if (ndx != 0) {
+ for (i = 0; gr->gr_mem[i] != NULL; i++) {
+ newgr->gr_mem[i] = dst;
+ dst = stpcpy(dst, gr->gr_mem[i]) + 1;
+ }
+ if (name != NULL) {
+ newgr->gr_mem[i++] = dst;
+ dst = stpcpy(dst, name) + 1;
}
- newgr->gr_mem[ndx] = NULL;
+ newgr->gr_mem[i] = NULL;
}
return (newgr);
}
/*
- * Add a new member name to a struct group.
+ * Calculate length of a struct group + given name
*/
-struct group *
-gr_add(struct group *gr, char *newmember)
+static size_t
+grmemlen(const struct group *gr, const char *name, int *num_mem)
{
- size_t mlen;
- int num_mem=0;
- char **members;
- struct group *newgr;
-
- if (newmember == NULL)
- return(gr_dup(gr));
+ size_t len;
+ int i;
+ if (gr == NULL)
+ return (0);
+ /* Calculate size of the group. */
+ len = sizeof(*gr);
+ if (gr->gr_name != NULL)
+ len += strlen(gr->gr_name) + 1;
+ if (gr->gr_passwd != NULL)
+ len += strlen(gr->gr_passwd) + 1;
if (gr->gr_mem != NULL) {
- for (num_mem = 0; gr->gr_mem[num_mem] != NULL; num_mem++) {
- if (strcmp(gr->gr_mem[num_mem], newmember) == 0) {
- errno = EEXIST;
- return (NULL);
- }
+ for (len = i = 0; gr->gr_mem[i] != NULL; i++) {
+ len += strlen(gr->gr_mem[i]) + 1;
+ len += sizeof(*gr->gr_mem);
}
+ *num_mem = i;
}
- /* Allocate enough for current pointers + 1 more and NULL marker */
- mlen = (num_mem + 2) * sizeof(*gr->gr_mem);
- if ((members = malloc(mlen)) == NULL)
- return (NULL);
- memcpy(members, gr->gr_mem, num_mem * sizeof(*gr->gr_mem));
- members[num_mem++] = newmember;
- members[num_mem] = NULL;
- gr->gr_mem = members;
- newgr = gr_dup(gr);
- free(members);
- return (newgr);
+ if (name != NULL) {
+ len += strlen(name) + 1;
+ if (gr->gr_mem == NULL)
+ len += sizeof(*gr->gr_mem);
+ }
+ return(len);
}
/*
diff --git a/lib/libutil/libutil.h b/lib/libutil/libutil.h
index b1b2405..b8b9836 100644
--- a/lib/libutil/libutil.h
+++ b/lib/libutil/libutil.h
@@ -167,7 +167,7 @@ int gr_copy(int __ffd, int _tfd, const struct group *_gr,
struct group *
gr_dup(const struct group *_gr);
struct group *
- gr_add(struct group *_gr, char *_newmember);
+ gr_add(const struct group *_gr, const char *_newmember);
int gr_equal(const struct group *_gr1, const struct group *_gr2);
void gr_fini(void);
int gr_init(const char *_dir, const char *_master);
diff --git a/lib/libyaml/Makefile b/lib/libyaml/Makefile
index 076eb54..5ecec78 100644
--- a/lib/libyaml/Makefile
+++ b/lib/libyaml/Makefile
@@ -7,6 +7,7 @@ SHLIB_MAJOR= 0
INCS= bsdyml.h
SRCS= api.c dumper.c emitter.c loader.c \
parser.c reader.c scanner.c writer.c
+MAN= libbsdyml.3
.PATH: ${LIBYAML}/src ${LIBYAML}/include
CLEANFILES= bsdyml.h
diff --git a/sbin/mount_ext2fs/mount_ext2fs.8 b/lib/libyaml/libbsdyml.3
index 91a315e..927c47c 100644
--- a/sbin/mount_ext2fs/mount_ext2fs.8
+++ b/lib/libyaml/libbsdyml.3
@@ -1,5 +1,5 @@
-.\" Copyright (c) 1993, 1994
-.\" The Regents of the University of California. All rights reserved.
+.\" Copyright (c) 2013 Baptiste Daroussin <bapt@FreeBSD.org>
+.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
@@ -9,14 +9,11 @@
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
-.\" 4. Neither the name of the University nor the names of its contributors
-.\" may be used to endorse or promote products derived from this software
-.\" without specific prior written permission.
.\"
-.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -27,46 +24,38 @@
.\"
.\" $FreeBSD$
.\"
-.Dd January 31, 1996
-.Dt MOUNT_EXT2FS 8
+.Dd March 05, 2013
+.Dt LIBBSDYML 3
.Os
.Sh NAME
-.Nm mount_ext2fs
-.Nd mount an ext2fs file system
+.Nm libbsdyml
+.Nd LibYAML library for parsing and emitting YAML
.Sh SYNOPSIS
-.Nm
-.Op Fl o Ar options
-.Ar special
-.Ar node
+.In bsdyml.h
.Sh DESCRIPTION
The
.Nm
-utility attaches an ext2fs file system
-.Ar special
-device on to the file system tree at the point
-.Ar node .
-.Pp
-This command is normally executed by
-.Xr mount 8
-at boot time.
+library is a verbatim copy of the LibYAML version 0.1.4
.Pp
-The options are as follows:
-.Bl -tag -width indent
-.It Fl o
-Options are specified with a
-.Fl o
-flag followed by a comma separated string of options.
-See the
-.Xr mount 8
-man page for possible options and their meanings.
-.El
-.Sh SEE ALSO
-.Xr mount 2 ,
-.Xr unmount 2 ,
-.Xr fstab 5 ,
-.Xr mount 8
-.Sh HISTORY
The
.Nm
-utility first appeared in
-.Fx 2.2 .
+library is intended to be used within the
+.Fx
+base system only.
+Use of the
+.Nm
+library for other purposes is not supported and discouraged.
+.Pp
+To avoid version and autoconfiguration issues, the library has been
+renamed to
+.Nm
+rather than retain the original LibYAML library and include file names
+to prevent confusion and autoconfiguration issues for 3rd party
+software.
+.Sh SEE ALSO
+For full documentation, please see the LibYAML webpage at
+.Pa http://pyyaml.org/wiki/LibYAML .
+.Sh AUTHORS
+.An -nosplit
+The original LibYAML was written by
+.An Kirill Simonov Aq xi@resolvent.net .
diff --git a/sbin/ipfw/ipfw2.c b/sbin/ipfw/ipfw2.c
index 5d79ece..ae5d2aa 100644
--- a/sbin/ipfw/ipfw2.c
+++ b/sbin/ipfw/ipfw2.c
@@ -3083,9 +3083,14 @@ chkarg:
} else {
len = sizeof(c->max_log);
if (sysctlbyname("net.inet.ip.fw.verbose_limit",
- &c->max_log, &len, NULL, 0) == -1)
+ &c->max_log, &len, NULL, 0) == -1) {
+ if (co.test_only) {
+ c->max_log = 0;
+ break;
+ }
errx(1, "sysctlbyname(\"%s\")",
"net.inet.ip.fw.verbose_limit");
+ }
}
}
break;
@@ -3986,9 +3991,13 @@ ipfw_table_handler(int ac, char *av[])
mask = 0; // XXX uninitialized ?
len = sizeof(tables_max);
if (sysctlbyname("net.inet.ip.fw.tables_max", &tables_max, &len,
- NULL, 0) == -1)
- errx(1, "Can't determine maximum number of ipfw tables. "
- "Perhaps you forgot to load ipfw module?");
+ NULL, 0) == -1) {
+ if (co.test_only)
+ tables_max = 128; /* Old conservative default */
+ else
+ errx(1, "Can't determine maximum number of ipfw tables."
+ " Perhaps you forgot to load ipfw module?");
+ }
memset(&xent, 0, sizeof(xent));
diff --git a/sbin/mount_cd9660/mount_cd9660.8 b/sbin/mount_cd9660/mount_cd9660.8
index 909af40..b471686 100644
--- a/sbin/mount_cd9660/mount_cd9660.8
+++ b/sbin/mount_cd9660/mount_cd9660.8
@@ -32,7 +32,7 @@
.\" @(#)mount_cd9660.8 8.3 (Berkeley) 3/27/94
.\" $FreeBSD$
.\"
-.Dd October 3, 2005
+.Dd March 5, 2013
.Dt MOUNT_CD9660 8
.Os
.Sh NAME
@@ -80,7 +80,7 @@ See the
man page for possible options and their meanings.
The following cd9660 specific options are available:
.Pp
-.Bl -tag -width "nostrictjoliet" -compact
+.Bl -tag -width "brokenjoliet" -compact
.It Cm extatt
Same as
.Fl e .
@@ -93,7 +93,7 @@ Same as
.It Cm norrip
Same as
.Fl r .
-.It Cm nostrictjoliet
+.It Cm brokenjoliet
Same as
.Fl b .
.El
diff --git a/sbin/mount_cd9660/mount_cd9660.c b/sbin/mount_cd9660/mount_cd9660.c
index 2215966..7ea6064 100644
--- a/sbin/mount_cd9660/mount_cd9660.c
+++ b/sbin/mount_cd9660/mount_cd9660.c
@@ -83,7 +83,7 @@ main(int argc, char **argv)
{
struct iovec *iov;
int iovlen;
- int ch, mntflags, opts;
+ int ch, mntflags;
char *dev, *dir, *p, *val, mntpath[MAXPATHLEN];
int verbose;
int ssector; /* starting sector, 0 for 1st session */
@@ -91,7 +91,7 @@ main(int argc, char **argv)
iov = NULL;
iovlen = 0;
- mntflags = opts = verbose = 0;
+ mntflags = verbose = 0;
ssector = -1;
while ((ch = getopt(argc, argv, "begjo:rs:vC:")) != -1)
@@ -109,7 +109,7 @@ main(int argc, char **argv)
build_iovec(&iov, &iovlen, "nojoliet", NULL, (size_t)-1);
break;
case 'o':
- getmntopts(optarg, mopts, &mntflags, &opts);
+ getmntopts(optarg, mopts, &mntflags, NULL);
p = strchr(optarg, '=');
val = NULL;
if (p != NULL) {
diff --git a/sbin/mount_ext2fs/Makefile b/sbin/mount_ext2fs/Makefile
deleted file mode 100644
index 08a0c3c..0000000
--- a/sbin/mount_ext2fs/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# @(#)Makefile 8.3 (Berkeley) 3/27/94
-# $FreeBSD$
-
-PROG= mount_ext2fs
-SRCS= mount_ext2fs.c getmntopts.c
-MAN= mount_ext2fs.8
-
-WARNS?= 2
-MOUNT= ${.CURDIR}/../mount
-CFLAGS+= -I${MOUNT}
-
-.PATH: ${MOUNT}
-
-.include <bsd.prog.mk>
diff --git a/sbin/mount_ext2fs/mount_ext2fs.c b/sbin/mount_ext2fs/mount_ext2fs.c
deleted file mode 100644
index 8753997..0000000
--- a/sbin/mount_ext2fs/mount_ext2fs.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*-
- * Copyright (c) 1993, 1994
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef lint
-static const char copyright[] =
-"@(#) Copyright (c) 1993, 1994\n\
- The Regents of the University of California. All rights reserved.\n";
-#endif /* not lint */
-
-#ifndef lint
-/*
-static char sccsid[] = "@(#)mount_lfs.c 8.3 (Berkeley) 3/27/94";
-*/
-static const char rcsid[] =
- "$FreeBSD$";
-#endif /* not lint */
-
-#include <sys/param.h>
-#include <sys/mount.h>
-#include <sys/uio.h>
-
-#include <err.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sysexits.h>
-#include <unistd.h>
-
-#include "mntopts.h"
-
-static void usage(void);
-
-int
-main(int argc, char *argv[])
-{
- struct iovec *iov;
- int ch, iovlen;
- char *fs_name, *fspec, mntpath[MAXPATHLEN];
- char *fstype;
-
- fstype = strrchr(argv[0], '_');
- if (fstype == NULL)
- errx(EX_USAGE, "argv[0] must end in _fstype");
- else
- ++fstype;
-
- iov = NULL;
- iovlen = 0;
- while ((ch = getopt(argc, argv, "o:")) != -1)
- switch (ch) {
- case 'o': {
- char *p = NULL;
- char *val = strdup("");
- p = strchr(optarg, '=');
- if (p != NULL) {
- free(val);
- *p = '\0';
- val = p + 1;
- }
- build_iovec(&iov, &iovlen, optarg, val, strlen(val)+1);
- }
- break;
- case '?':
- default:
- usage();
- }
- argc -= optind;
- argv += optind;
-
- if (argc != 2)
- usage();
-
- fspec = argv[0]; /* the name of the device file */
- fs_name = argv[1]; /* the mount point */
-
- /*
- * Resolve the mountpoint with realpath(3) and remove unnecessary
- * slashes from the devicename if there are any.
- */
- if (checkpath(fs_name, mntpath) != 0)
- err(EX_USAGE, "%s", mntpath);
- (void)rmslashes(fspec, fspec);
-
- build_iovec(&iov, &iovlen, "fstype", fstype, strlen(fstype) + 1);
- build_iovec(&iov, &iovlen, "fspath", mntpath, strlen(mntpath) + 1);
- build_iovec(&iov, &iovlen, "from", fspec, strlen(fspec) + 1);
-
- if (nmount(iov, iovlen, 0) < 0)
- err(EX_OSERR, "%s", fspec);
- return (0);
-}
-
-static void
-usage()
-{
- (void)fprintf(stderr,
- "usage: mount_ext2fs [-o options] special node\n");
- exit(EX_USAGE);
-}
diff --git a/sbin/mount_msdosfs/mount_msdosfs.c b/sbin/mount_msdosfs/mount_msdosfs.c
index 3da673d..8814fdd 100644
--- a/sbin/mount_msdosfs/mount_msdosfs.c
+++ b/sbin/mount_msdosfs/mount_msdosfs.c
@@ -69,7 +69,7 @@ main(int argc, char **argv)
struct iovec *iov = NULL;
int iovlen = 0;
struct stat sb;
- int c, mntflags, set_gid, set_uid, set_mask, set_dirmask;
+ int c, set_gid, set_uid, set_mask, set_dirmask;
char *dev, *dir, mntpath[MAXPATHLEN], *csp;
char fstype[] = "msdosfs";
char errmsg[255] = {0};
@@ -78,9 +78,8 @@ main(int argc, char **argv)
mode_t mask = 0, dirmask = 0;
uid_t uid = 0;
gid_t gid = 0;
- getmnt_silent = 1;
- mntflags = set_gid = set_uid = set_mask = set_dirmask = 0;
+ set_gid = set_uid = set_mask = set_dirmask = 0;
while ((c = getopt(argc, argv, "sl9u:g:m:M:o:L:D:W:")) != -1) {
switch (c) {
@@ -219,7 +218,7 @@ main(int argc, char **argv)
build_iovec_argf(&iov, &iovlen, "mask", "%u", mask);
build_iovec_argf(&iov, &iovlen, "dirmask", "%u", dirmask);
- if (nmount(iov, iovlen, mntflags) < 0) {
+ if (nmount(iov, iovlen, 0) < 0) {
if (errmsg[0])
err(1, "%s: %s", dev, errmsg);
else
diff --git a/sbin/mount_nfs/mount_nfs.c b/sbin/mount_nfs/mount_nfs.c
index d71e952..bd016f3 100644
--- a/sbin/mount_nfs/mount_nfs.c
+++ b/sbin/mount_nfs/mount_nfs.c
@@ -130,7 +130,7 @@ enum tryret {
TRYRET_LOCALERR /* Local failure. */
};
-static int fallback_mount(struct iovec *iov, int iovlen, int mntflags);
+static int fallback_mount(struct iovec *iov, int iovlen);
static int sec_name_to_num(char *sec);
static char *sec_num_to_name(int num);
static int getnfsargs(char *, struct iovec **iov, int *iovlen);
@@ -149,13 +149,12 @@ main(int argc, char *argv[])
{
int c;
struct iovec *iov;
- int mntflags, num, iovlen;
+ int num, iovlen;
int osversion;
char *name, *p, *spec, *fstype;
char mntpath[MAXPATHLEN], errmsg[255];
char hostname[MAXHOSTNAMELEN + 1], *gssname, gssn[MAXHOSTNAMELEN + 50];
- mntflags = 0;
iov = NULL;
iovlen = 0;
memset(errmsg, 0, sizeof(errmsg));
@@ -427,10 +426,10 @@ main(int argc, char *argv[])
*/
osversion = getosreldate();
if (osversion >= 702100) {
- if (nmount(iov, iovlen, mntflags))
+ if (nmount(iov, iovlen, 0))
err(1, "%s, %s", mntpath, errmsg);
} else {
- if (fallback_mount(iov, iovlen, mntflags))
+ if (fallback_mount(iov, iovlen))
err(1, "%s, %s", mntpath, errmsg);
}
@@ -473,7 +472,7 @@ copyopt(struct iovec **newiov, int *newiovlen,
* parameters. It should be eventually be removed.
*/
static int
-fallback_mount(struct iovec *iov, int iovlen, int mntflags)
+fallback_mount(struct iovec *iov, int iovlen)
{
struct nfs_args args = {
.version = NFS_ARGSVERSION,
@@ -663,7 +662,7 @@ fallback_mount(struct iovec *iov, int iovlen, int mntflags)
copyopt(&newiov, &newiovlen, iov, iovlen, "fspath");
copyopt(&newiov, &newiovlen, iov, iovlen, "errmsg");
- return nmount(newiov, newiovlen, mntflags);
+ return nmount(newiov, newiovlen, 0);
}
static int
diff --git a/sbin/mount_nullfs/mount_nullfs.c b/sbin/mount_nullfs/mount_nullfs.c
index aaf66e5..e08599f 100644
--- a/sbin/mount_nullfs/mount_nullfs.c
+++ b/sbin/mount_nullfs/mount_nullfs.c
@@ -68,12 +68,11 @@ main(int argc, char *argv[])
char source[MAXPATHLEN];
char target[MAXPATHLEN];
char errmsg[255];
- int ch, mntflags, iovlen;
+ int ch, iovlen;
char nullfs[] = "nullfs";
iov = NULL;
iovlen = 0;
- mntflags = 0;
errmsg[0] = '\0';
while ((ch = getopt(argc, argv, "o:")) != -1)
switch(ch) {
@@ -111,7 +110,7 @@ main(int argc, char *argv[])
build_iovec(&iov, &iovlen, "fspath", source, (size_t)-1);
build_iovec(&iov, &iovlen, "target", target, (size_t)-1);
build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg));
- if (nmount(iov, iovlen, mntflags) < 0) {
+ if (nmount(iov, iovlen, 0) < 0) {
if (errmsg[0] != 0)
err(1, "%s: %s", source, errmsg);
else
diff --git a/sbin/mount_reiserfs/Makefile b/sbin/mount_reiserfs/Makefile
deleted file mode 100644
index 686629f..0000000
--- a/sbin/mount_reiserfs/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# $FreeBSD$
-
-PROG = mount_reiserfs
-SRCS = mount_reiserfs.c getmntopts.c
-MAN = mount_reiserfs.8
-
-# mount_reiserfs needs mntopts.h and getmntopts.c from src/sbin/mount/
-MOUNT ?= ${.CURDIR}/../mount
-CFLAGS += -I${MOUNT}
-
-.PATH: ${MOUNT}
-
-.include <bsd.prog.mk>
diff --git a/sbin/mount_reiserfs/mount_reiserfs.8 b/sbin/mount_reiserfs/mount_reiserfs.8
deleted file mode 100644
index f246475..0000000
--- a/sbin/mount_reiserfs/mount_reiserfs.8
+++ /dev/null
@@ -1,90 +0,0 @@
-.\"
-.\" Copyright (c) 1993,1994 Christopher G. Demetriou
-.\" Copyright (c) 1999 Semen Ustimenko
-.\" Copyright (c) 2005 Jean-Sébastien Pédron
-.\" All rights reserved.
-.\"
-.\" Redistribution and use in source and binary forms, with or without
-.\" modification, are permitted provided that the following conditions
-.\" are met:
-.\" 1. Redistributions of source code must retain the above copyright
-.\" notice, this list of conditions and the following disclaimer.
-.\" 2. Redistributions in binary form must reproduce the above copyright
-.\" notice, this list of conditions and the following disclaimer in the
-.\" documentation and/or other materials provided with the distribution.
-.\" 3. All advertising materials mentioning features or use of this software
-.\" must display the following acknowledgment:
-.\" This product includes software developed by Christopher G. Demetriou.
-.\" 3. The name of the author may not be used to endorse or promote products
-.\" derived from this software without specific prior written permission
-.\"
-.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-.\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-.\"
-.\" $FreeBSD$
-.\"
-.Dd February 3, 2005
-.Dt MOUNT_REISERFS 8
-.Os
-.Sh NAME
-.Nm mount_reiserfs
-.Nd "mount a ReiserFS file system"
-.Sh SYNOPSIS
-.Nm
-.Ar special
-.Ar node
-.Sh DESCRIPTION
-The
-.Nm
-utility attaches the ReiserFS file system residing on the device
-.Ar special
-to the global file system namespace at the location
-indicated by
-.Ar node .
-.Pp
-This command is normally executed by
-.Xr mount 8
-at boot time, but can be used by any user to mount a
-ReiserFS file system on any directory that they own (provided,
-of course, that they have appropriate access to the device that
-contains the file system).
-.Sh EXAMPLES
-To mount a ReiserFS volume located in
-.Pa /dev/ad1s1 :
-.Pp
-.Dl "mount_reiserfs /dev/ad1s1 /mnt"
-.Sh SEE ALSO
-.Xr mount 2 ,
-.Xr unmount 2 ,
-.Xr fstab 5 ,
-.Xr mount 8
-.Sh HISTORY
-The
-.Nm
-utility first appeared in
-.Fx 6.0 .
-.Sh AUTHORS
-.An -nosplit
-The ReiserFS kernel implementation was written by
-.An Hans Reiser
-.Pq Pa http://www.namesys.com/ ,
-and ported to
-.Fx
-by
-.An Jean-S\['e]bastien P\['e]dron Aq dumbbell@FreeBSD.org .
-.Pp
-The
-.Nm
-utility and manual were written by
-.An Jean-S\['e]bastien P\['e]dron Aq dumbbell@FreeBSD.org .
-.Sh CAVEATS
-This utility is primarily used for read access to a ReiserFS volume.
-Writing to a volume is currently unsupported.
diff --git a/sbin/mount_reiserfs/mount_reiserfs.c b/sbin/mount_reiserfs/mount_reiserfs.c
deleted file mode 100644
index bf62526..0000000
--- a/sbin/mount_reiserfs/mount_reiserfs.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*-
- * Copyright (c) 2005 Jean-Sébastien Pédron
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <sys/param.h>
-#include <sys/mount.h>
-#include <sys/uio.h>
-
-#include <err.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sysexits.h>
-#include <unistd.h>
-
-#include "mntopts.h"
-
-struct mntopt mopts[] = {
- MOPT_STDOPTS,
- MOPT_END
-};
-
-void usage(void);
-
-int
-main(int argc, char *argv[])
-{
- struct iovec *iov;
- int ch, mntflags, iovlen;
- char *dev, *dir, mntpath[MAXPATHLEN];
- char fstype[] = "reiserfs";
-
- mntflags = 0;
- while ((ch = getopt(argc, argv, "o:")) != -1) {
- switch(ch) {
- case 'o':
- getmntopts(optarg, mopts, &mntflags, 0);
- break;
- case '?':
- default:
- usage();
- }
- }
- argc -= optind;
- argv += optind;
-
- if (argc != 2)
- usage();
-
- dev = argv[0];
- dir = argv[1];
-
- /*
- * Resolve the mountpoint with realpath(3) and remove unnecessary
- * slashes from the devicename if there are any.
- */
- if (checkpath(dir, mntpath) != 0)
- err(EX_USAGE, "%s", mntpath);
- (void)rmslashes(dev, dev);
-
- /* Read-only support for now */
- mntflags |= MNT_RDONLY;
-
- /* Prepare the options vector for nmount(). build_iovec() is declared
- * in mntopts.h. */
- iov = NULL;
- iovlen = 0;
- build_iovec(&iov, &iovlen, "fstype", fstype, (size_t)-1);
- build_iovec(&iov, &iovlen, "fspath", mntpath, (size_t)-1);
- build_iovec(&iov, &iovlen, "from", dev, (size_t)-1);
-
- if (nmount(iov, iovlen, mntflags) < 0)
- err(EX_OSERR, "%s", dev);
-
- exit(0);
-}
-
-void
-usage(void)
-{
- fprintf(stderr,
- "usage: mount_reiserfs [-o options] special node\n");
- exit(EX_USAGE);
-}
diff --git a/sbin/mount_std/Makefile b/sbin/mount_std/Makefile
deleted file mode 100644
index 44ff9ef..0000000
--- a/sbin/mount_std/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-# @(#)Makefile 8.2 (Berkeley) 3/27/94
-# $FreeBSD$
-
-PROG= mount_std
-SRCS= mount_std.c getmntopts.c
-MAN= mount_std.8
-MLINKS= mount_std.8 mount_devfs.8 \
- mount_std.8 mount_fdescfs.8 \
- mount_std.8 mount_linprocfs.8 \
- mount_std.8 mount_procfs.8
-
-MOUNT= ${.CURDIR}/../mount
-CFLAGS+= -I${MOUNT}
-WARNS?= 3
-
-.PATH: ${MOUNT}
-
-LINKS= ${BINDIR}/mount_std ${BINDIR}/mount_devfs \
- ${BINDIR}/mount_std ${BINDIR}/mount_fdescfs \
- ${BINDIR}/mount_std ${BINDIR}/mount_linprocfs \
- ${BINDIR}/mount_std ${BINDIR}/mount_procfs
-
-.include <bsd.prog.mk>
diff --git a/sbin/mount_std/mount_std.8 b/sbin/mount_std/mount_std.8
deleted file mode 100644
index 0aeeba8..0000000
--- a/sbin/mount_std/mount_std.8
+++ /dev/null
@@ -1,167 +0,0 @@
-.\"
-.\" Copyright (c) 1992, 1993, 1994
-.\" The Regents of the University of California. All rights reserved.
-.\" All rights reserved.
-.\"
-.\" This code is derived from software donated to Berkeley by
-.\" Jan-Simon Pendry.
-.\"
-.\" Redistribution and use in source and binary forms, with or without
-.\" modification, are permitted provided that the following conditions
-.\" are met:
-.\" 1. Redistributions of source code must retain the above copyright
-.\" notice, this list of conditions and the following disclaimer.
-.\" 2. Redistributions in binary form must reproduce the above copyright
-.\" notice, this list of conditions and the following disclaimer in the
-.\" documentation and/or other materials provided with the distribution.
-.\" 4. Neither the name of the University nor the names of its contributors
-.\" may be used to endorse or promote products derived from this software
-.\" without specific prior written permission.
-.\"
-.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
-.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
-.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-.\" SUCH DAMAGE.
-.\"
-.\" $FreeBSD$
-.\"
-.Dd November 26, 2004
-.Dt MOUNT_STD 8
-.Os
-.Sh NAME
-.Nm mount_std ,
-.Nm mount_devfs ,
-.Nm mount_fdescfs ,
-.Nm mount_linprocfs ,
-.Nm mount_procfs
-.Nd mount
-.Dq standard
-file systems
-.Sh SYNOPSIS
-.Nm mount_ Ns Ar fsname
-.Op Fl o Ar options
-.Ar "fs"
-.Ar mount_point
-.Sh DESCRIPTION
-The
-.Nm
-utility is a generic mechanism for attaching ``standard'' file systems to
-the file system.
-The
-.Nm
-utility currently supports the following file systems:
-.Nm devfs ,
-.Nm fdescfs ,
-.Nm linprocfs
-and
-.Nm procfs .
-A ``standard'' file system is one which:
-.Bl -enum -offset indent
-.It
-accepts only the standard
-.Fl o
-options
-.Dq ro ,
-.Dq rw ,
-.Dq noexec ,
-.Dq nosuid ,
-and
-.Dq union .
-.It
-has a kernel file system module name the same as its user-visible name.
-.It
-requires no other special processing on the part of the
-.Nm
-utility.
-.El
-.Pp
-The options are as follows:
-.Bl -tag -width indent
-.It Fl o
-Options are specified with a
-.Fl o
-flag followed by a comma separated string of options.
-See the
-.Xr mount 8
-man page for possible options and their meanings.
-.El
-.Pp
-The
-.Nm
-utility examines its zeroth command-line argument (the name by which
-it was called) to determine the type of file system to be mounted.
-If
-it is called by a name which does not end in
-.Dq Li _ Ns Ar fsname ,
-.Nm
-will assume (for compatibility
-with
-.Xr mount 8 )
-that the zeroth argument contains only the name of the file system type.
-The
-.Nm
-utility is normally installed with appropriate links to commands for
-the distributed file systems which can be mounted in this way;
-for information on the function of each file system, see the manual page
-for that specific
-.Nm mount_ Ns Ar fsname
-utility.
-.Pp
-Refer to the following manual pages for detailed information
-on these file systems:
-.Xr devfs 5 ,
-.Xr fdescfs 5 ,
-.Xr linprocfs 5
-and
-.Xr procfs 5 .
-.Sh DIAGNOSTICS
-.Bl -diag
-.It argv[0] must end in _fsname
-The
-.Nm
-utility was called with a zeroth argument of
-.Dq Li mount_std .
-.It %s file system not available
-The specified file system type was not present in the kernel and no
-loadable module for it was found.
-.El
-.Sh SEE ALSO
-.Xr mount 2 ,
-.Xr unmount 2 ,
-.Xr getvfsbyname 3 ,
-.Xr devfs 5 ,
-.Xr fdescfs 5 ,
-.Xr fstab 5 ,
-.Xr linprocfs 5 ,
-.Xr procfs 5 ,
-.Xr mount 8
-.Sh HISTORY
-The
-.Nm
-utility first appeared in
-.Fx 2.2 .
-Loadable file system modules first appeared in
-.Fx 2.0 .
-The
-.Dq fdescfs
-and
-.Dq procfs
-file system types first appeared in
-.Fx 2.0 ;
-the
-.Dq devfs
-file system type first appeared in
-.Fx 2.2 ;
-the
-.Dq linprocfs
-file system type first appeared in
-.Fx 4.0 .
-.Sh CAVEATS
-None of the ``standard'' file systems may be NFS-exported.
diff --git a/sbin/mount_std/mount_std.c b/sbin/mount_std/mount_std.c
deleted file mode 100644
index c7f1643..0000000
--- a/sbin/mount_std/mount_std.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 1990, 1992 Jan-Simon Pendry
- * Copyright (c) 1992, 1993, 1994
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Jan-Simon Pendry.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef lint
-static const char copyright[] =
-"@(#) Copyright (c) 1992, 1993, 1994\n\
- The Regents of the University of California. All rights reserved.\n";
-#endif /* not lint */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/mount.h>
-#include <sys/uio.h>
-
-#include <err.h>
-#include <errno.h>
-#include <stdio.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sysexits.h>
-#include <unistd.h>
-
-#include "mntopts.h"
-
-static struct mntopt mopts[] = {
- MOPT_STDOPTS,
- MOPT_END
-};
-
-static char *fsname;
-static volatile sig_atomic_t caughtsig;
-
-static void usage(void) __dead2;
-
-static void
-catchsig(int s __unused)
-{
- caughtsig = 1;
-}
-
-int
-main(int argc, char *argv[])
-{
- int ch, mntflags;
- char mntpath[MAXPATHLEN];
- struct iovec iov[4];
- int error;
-
- /*
- * XXX
- * mount(8) calls the mount programs with an argv[0] which is
- * /just/ the file system name. So, if there is no underscore
- * in argv[0], we assume that we are being called from mount(8)
- * and that argv[0] is thus the name of the file system type.
- */
- fsname = strrchr(argv[0], '_');
- if (fsname) {
- if (strcmp(fsname, "_std") == 0)
- errx(EX_USAGE, "argv[0] must end in _fsname");
- fsname++;
- } else {
- fsname = argv[0];
- }
-
- mntflags = 0;
- while ((ch = getopt(argc, argv, "o:")) != -1)
- switch (ch) {
- case 'o':
- getmntopts(optarg, mopts, &mntflags, 0);
- break;
- case '?':
- default:
- usage();
- }
- argc -= optind;
- argv += optind;
-
- if (argc != 2)
- usage();
-
- /* resolve the mountpoint with realpath(3) */
- if (checkpath(argv[1], mntpath) != 0)
- err(EX_USAGE, "%s", mntpath);
-
- iov[0].iov_base = "fstype";
- iov[0].iov_len = sizeof("fstype");
- iov[1].iov_base = fsname;
- iov[1].iov_len = strlen(iov[1].iov_base) + 1;
- iov[2].iov_base = "fspath";
- iov[2].iov_len = sizeof("fspath");
- iov[3].iov_base = mntpath;
- iov[3].iov_len = strlen(mntpath) + 1;
-
- /*
- * nmount(2) would kill us with SIGSYS if the kernel doesn't have it.
- * This design bug is inconvenient. We must catch the signal and not
- * just ignore it because of a plain bug: nmount(2) would return
- * EINVAL instead of the correct ENOSYS if the kernel doesn't have it
- * and we don't let the signal kill us. EINVAL is too ambiguous.
- * This bug in 4.4BSD-Lite1 was fixed in 4.4BSD-Lite2 but is still in
- * FreeBSD-5.0.
- */
- signal(SIGSYS, catchsig);
- error = nmount(iov, 4, mntflags);
- signal(SIGSYS, SIG_DFL);
-
- /*
- * Try with the old mount syscall in the case
- * this file system has not been converted yet,
- * or the user didn't recompile his kernel.
- */
- if (error && (errno == EOPNOTSUPP || errno == ENOSYS || caughtsig))
- error = mount(fsname, mntpath, mntflags, NULL);
-
- if (error)
- err(EX_OSERR, NULL);
- exit(0);
-}
-
-void
-usage(void)
-{
- (void)fprintf(stderr,
- "usage: mount_%s [-o options] what_to_mount mount_point\n",
- fsname);
- exit(EX_USAGE);
-}
diff --git a/sbin/mount_udf/Makefile b/sbin/mount_udf/Makefile
index c5351fd..06ad9a7 100644
--- a/sbin/mount_udf/Makefile
+++ b/sbin/mount_udf/Makefile
@@ -9,7 +9,6 @@ LDADD= -lkiconv
MOUNT= ${.CURDIR}/../mount
CFLAGS+= -I${MOUNT} -I${.CURDIR}/../../sys
.PATH: ${MOUNT}
-WARNS?= 1
# Needs to be dynamically linked for optional dlopen() access to
# userland libiconv
diff --git a/sbin/mount_udf/mount_udf.c b/sbin/mount_udf/mount_udf.c
index 8ee1286..01fa44f 100644
--- a/sbin/mount_udf/mount_udf.c
+++ b/sbin/mount_udf/mount_udf.c
@@ -73,18 +73,19 @@ void usage(void);
int
main(int argc, char **argv)
{
- struct iovec iov[12];
- int ch, i, mntflags, opts, udf_flags;
- char *dev, *dir, mntpath[MAXPATHLEN];
- char *cs_disk, *cs_local;
- int verbose;
+ char mntpath[MAXPATHLEN];
+ char fstype[] = "udf";
+ struct iovec *iov;
+ char *cs_disk, *cs_local, *dev, *dir;
+ int ch, i, iovlen, mntflags, udf_flags, verbose;
- i = mntflags = opts = udf_flags = verbose = 0;
+ i = iovlen = mntflags = udf_flags = verbose = 0;
cs_disk = cs_local = NULL;
+ iov = NULL;
while ((ch = getopt(argc, argv, "o:vC:")) != -1)
switch (ch) {
case 'o':
- getmntopts(optarg, mopts, &mntflags, &opts);
+ getmntopts(optarg, mopts, &mntflags, NULL);
break;
case 'v':
verbose++;
@@ -120,32 +121,13 @@ main(int argc, char **argv)
*/
mntflags |= MNT_RDONLY;
- iov[i].iov_base = "fstype";
- iov[i++].iov_len = sizeof("fstype");
- iov[i].iov_base = "udf";
- iov[i].iov_len = strlen(iov[i].iov_base) + 1;
- i++;
- iov[i].iov_base = "fspath";
- iov[i++].iov_len = sizeof("fspath");
- iov[i].iov_base = mntpath;
- iov[i++].iov_len = strlen(mntpath) + 1;
- iov[i].iov_base = "from";
- iov[i++].iov_len = sizeof("from");
- iov[i].iov_base = dev;
- iov[i++].iov_len = strlen(dev) + 1;
- iov[i].iov_base = "flags";
- iov[i++].iov_len = sizeof("flags");
- iov[i].iov_base = &udf_flags;
- iov[i++].iov_len = sizeof(udf_flags);
+ build_iovec(&iov, &iovlen, "fstype", fstype, (size_t)-1);
+ build_iovec(&iov, &iovlen, "fspath", mntpath, (size_t)-1);
+ build_iovec(&iov, &iovlen, "from", dev, (size_t)-1);
+ build_iovec(&iov, &iovlen, "flags", &udf_flags, sizeof(udf_flags));
if (udf_flags & UDFMNT_KICONV) {
- iov[i].iov_base = "cs_disk";
- iov[i++].iov_len = sizeof("cs_disk");
- iov[i].iov_base = cs_disk;
- iov[i++].iov_len = strlen(cs_disk) + 1;
- iov[i].iov_base = "cs_local";
- iov[i++].iov_len = sizeof("cs_local");
- iov[i].iov_base = cs_local;
- iov[i++].iov_len = strlen(cs_local) + 1;
+ build_iovec(&iov, &iovlen, "cs_disk", cs_disk, (size_t)-1);
+ build_iovec(&iov, &iovlen, "cs_local", cs_local, (size_t)-1);
}
if (nmount(iov, i, mntflags) < 0)
err(1, "%s", dev);
diff --git a/sbin/mount_unionfs/mount_unionfs.c b/sbin/mount_unionfs/mount_unionfs.c
index c42bf04..1026ecf 100644
--- a/sbin/mount_unionfs/mount_unionfs.c
+++ b/sbin/mount_unionfs/mount_unionfs.c
@@ -129,7 +129,7 @@ int
main(int argc, char *argv[])
{
struct iovec *iov;
- int ch, mntflags, iovlen;
+ int ch, iovlen;
char source [MAXPATHLEN], target[MAXPATHLEN], errmsg[255];
char uid_str[20], gid_str[20];
char fstype[] = "unionfs";
@@ -137,7 +137,6 @@ main(int argc, char *argv[])
iov = NULL;
iovlen = 0;
- mntflags = 0;
memset(errmsg, 0, sizeof(errmsg));
while ((ch = getopt(argc, argv, "bo:")) != -1) {
@@ -190,7 +189,7 @@ main(int argc, char *argv[])
build_iovec(&iov, &iovlen, "from", target, (size_t)-1);
build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg));
- if (nmount(iov, iovlen, mntflags))
+ if (nmount(iov, iovlen, 0))
err(EX_OSERR, "%s: %s", source, errmsg);
exit(0);
}
diff --git a/share/man/man4/eventtimers.4 b/share/man/man4/eventtimers.4
index 4db4420..656c8e0 100644
--- a/share/man/man4/eventtimers.4
+++ b/share/man/man4/eventtimers.4
@@ -143,14 +143,6 @@ By default this options is disabled.
If chosen timer is per-CPU
and runs in periodic mode, this option has no effect - all interrupts are
always generating.
-.It Va kern.eventtimer.activetick
-makes each CPU to receive all kinds of timer interrupts when they are busy.
-Disabling it allows to skip some
-.Fn hardclock
-calls in some cases.
-By default this options is enabled.
-If chosen timer is per-CPU, this option has no effect - all interrupts are
-always generating, as timer reprogramming is too expensive for that case.
.El
.Sh SEE ALSO
.Xr apic 4 ,
diff --git a/share/man/man4/zyd.4 b/share/man/man4/zyd.4
index ba8f573..5a1f0a5 100644
--- a/share/man/man4/zyd.4
+++ b/share/man/man4/zyd.4
@@ -32,7 +32,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
.\" THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.Dd October 2, 2009
+.Dd March 7, 2013
.Dt ZYD 4
.Os
.Sh NAME
@@ -121,6 +121,7 @@ driver:
.It X-Micro XWL-11GUZX
.It Yakumo QuickWLAN USB
.It Zonet ZEW2501
+.It ZyXEL ZyAIR G-202
.It ZyXEL ZyAIR G-220
.El
.Sh EXAMPLES
diff --git a/share/man/man7/development.7 b/share/man/man7/development.7
index 3da1858..58e6892 100644
--- a/share/man/man7/development.7
+++ b/share/man/man7/development.7
@@ -23,7 +23,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd September 19, 2012
+.Dd March 7, 2013
.Dt DEVELOPMENT 7
.Os
.Sh NAME
@@ -109,9 +109,9 @@ your clients automatically pick up the changes.
.Bd -literal -offset 4n
mkdir /FreeBSD
cd /FreeBSD
-svn co svn://svn.freebsd.org/ports/head ports
-svn co svn://svn.freebsd.org/doc/head doc
-svn co svn://svn.freebsd.org/base/head src
+svn co https://svn.freebsd.org/ports/head ports
+svn co https://svn.freebsd.org/doc/head doc
+svn co https://svn.freebsd.org/base/head src
cd /usr
rm -rf src
ln -s /FreeBSD/src src
diff --git a/share/man/man7/ports.7 b/share/man/man7/ports.7
index 35a2231..1243655 100644
--- a/share/man/man7/ports.7
+++ b/share/man/man7/ports.7
@@ -490,27 +490,17 @@ single file
.Bl -tag -width ".Pa /usr/ports/Mk/bsd.port.mk" -compact
.It Pa /usr/ports
The default ports directory
-.No ( Fx
-and
-.Ox ) .
-.It Pa /usr/pkgsrc
-The default ports directory
-.Pq Nx .
.It Pa /usr/ports/Mk/bsd.port.mk
The big Kahuna.
.El
.Sh SEE ALSO
.Xr make 1 ,
-.Xr pkg_add 1 ,
-.Xr pkg_create 1 ,
-.Xr pkg_delete 1 ,
-.Xr pkg_info 1 ,
-.Xr pkg_version 1
+.Xr pkg 8 ,
+.Xr portsnap 8
.Pp
The following are part of the ports collection:
.Pp
.Xr portaudit 1 ,
-.Xr portcheckout 1 ,
.Xr portlint 1
.Rs
.%B "The FreeBSD Handbook"
diff --git a/share/man/man9/Makefile b/share/man/man9/Makefile
index 8e96d64..fa7cc8a 100644
--- a/share/man/man9/Makefile
+++ b/share/man/man9/Makefile
@@ -1196,9 +1196,13 @@ MLINKS+=signal.9 cursig.9 \
signal.9 SIG_STOPSIGMASK.9 \
signal.9 trapsignal.9
MLINKS+=sleep.9 msleep.9 \
+ sleep.9 msleep_sbt.9 \
sleep.9 msleep_spin.9 \
+ sleep.9 msleep_spin_sbt.9 \
sleep.9 pause.9 \
+ sleep.9 pause_sbt.9 \
sleep.9 tsleep.9 \
+ sleep.9 tsleep_sbt.9 \
sleep.9 wakeup.9 \
sleep.9 wakeup_one.9
MLINKS+=sleepqueue.9 init_sleepqueues.9 \
@@ -1213,6 +1217,7 @@ MLINKS+=sleepqueue.9 init_sleepqueues.9 \
sleepqueue.9 sleepq_release.9 \
sleepqueue.9 sleepq_remove.9 \
sleepqueue.9 sleepq_set_timeout.9 \
+ sleepqueue.9 sleepq_set_timeout_sbt.9 \
sleepqueue.9 sleepq_signal.9 \
sleepqueue.9 sleepq_timedwait.9 \
sleepqueue.9 sleepq_timedwait_sig.9 \
@@ -1335,6 +1340,9 @@ MLINKS+=timeout.9 callout.9 \
timeout.9 callout_init_rw.9 \
timeout.9 callout_pending.9 \
timeout.9 callout_reset.9 \
+ timeout.9 callout_reset_sbt.9 \
+ timeout.9 callout_reset_on.9 \
+ timeout.9 callout_reset_sbt_on.9 \
timeout.9 callout_schedule.9 \
timeout.9 callout_stop.9 \
timeout.9 untimeout.9
diff --git a/share/man/man9/condvar.9 b/share/man/man9/condvar.9
index 7b89d46..4a4e874 100644
--- a/share/man/man9/condvar.9
+++ b/share/man/man9/condvar.9
@@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd June 5, 2007
+.Dd February 19, 2013
.Dt CONDVAR 9
.Os
.Sh NAME
@@ -37,7 +37,9 @@
.Nm cv_wait_sig ,
.Nm cv_wait_unlock ,
.Nm cv_timedwait ,
+.Nm cv_timedwait_sbt ,
.Nm cv_timedwait_sig ,
+.Nm cv_timedwait_sig_sbt ,
.Nm cv_signal ,
.Nm cv_broadcast ,
.Nm cv_broadcastpri ,
@@ -60,7 +62,13 @@
.Ft int
.Fn cv_timedwait "struct cv *cvp" "lock" "int timo"
.Ft int
+.Fn cv_timedwait_sbt "struct cv *cvp" "lock" "sbintime_t sbt" \
+"sbintime_t pr" "int flags"
+.Ft int
.Fn cv_timedwait_sig "struct cv *cvp" "lock" "int timo"
+.Ft int
+.Fn cv_timedwait_sig_sbt "struct cv *cvp" "lock" "sbintime_t sbt" \
+"sbintime_t pr" "int flags"
.Ft void
.Fn cv_signal "struct cv *cvp"
.Ft void
@@ -191,6 +199,25 @@ if a signal is caught, or 0 if signaled via
.Fn cv_signal
or
.Fn cv_broadcast .
+.Pp
+.Fn cv_timedwait_sbt
+and
+.Fn cv_timedwait_sig_sbt
+functions take
+.Fa sbt
+argument instead of
+.Fa timo .
+It allows to specify relative or absolute unblock time with higher resolution
+in form of
+.Vt sbintime_t .
+The parameter
+.Fa pr
+allows to specify wanted absolute event precision.
+The parameter
+.Fa flags
+allows to pass additional
+.Fn callout_reset_sbt
+flags.
.Sh RETURN VALUES
If successful,
.Fn cv_wait_sig ,
@@ -230,4 +257,5 @@ Timeout expired.
.Xr rwlock 9 ,
.Xr sema 9 ,
.Xr sleep 9 ,
-.Xr sx 9
+.Xr sx 9 ,
+.Xr timeout 9
diff --git a/share/man/man9/sleep.9 b/share/man/man9/sleep.9
index e65a189..4ac9d45 100644
--- a/share/man/man9/sleep.9
+++ b/share/man/man9/sleep.9
@@ -25,14 +25,18 @@
.\"
.\" $FreeBSD$
.\"
-.Dd December 12, 2009
+.Dd February 19, 2013
.Dt SLEEP 9
.Os
.Sh NAME
.Nm msleep ,
+.Nm msleep_sbt ,
.Nm msleep_spin ,
+.Nm msleep_spin_sbt ,
.Nm pause ,
+.Nm pause_sbt ,
.Nm tsleep ,
+.Nm tsleep_sbt ,
.Nm wakeup
.Nd wait for events
.Sh SYNOPSIS
@@ -42,11 +46,23 @@
.Ft int
.Fn msleep "void *chan" "struct mtx *mtx" "int priority" "const char *wmesg" "int timo"
.Ft int
+.Fn msleep_sbt "void *chan" "struct mtx *mtx" "int priority" \
+"const char *wmesg" "sbintime_t sbt" "sbintime_t pr" "int flags"
+.Ft int
.Fn msleep_spin "void *chan" "struct mtx *mtx" "const char *wmesg" "int timo"
+.Ft int
+.Fn msleep_spin_sbt "void *chan" "struct mtx *mtx" "const char *wmesg" \
+"sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft void
.Fn pause "const char *wmesg" "int timo"
+.Ft void
+.Fn pause_sbt "const char *wmesg" "sbintime_t sbt" "sbintime_t pr" \
+ "int flags"
.Ft int
.Fn tsleep "void *chan" "int priority" "const char *wmesg" "int timo"
+.Ft int
+.Fn tsleep_sbt "void *chan" "int priority" "const char *wmesg" \
+"sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft void
.Fn wakeup "void *chan"
.Ft void
@@ -148,6 +164,27 @@ If the timeout expires,
then the sleep function will return
.Er EWOULDBLOCK .
.Pp
+.Fn msleep_sbt ,
+.Fn msleep_spin_sbt ,
+.Fn pause_sbt
+and
+.Fn tsleep_sbt
+functions take
+.Fa sbt
+parameter instead of
+.Fa timo .
+It allows to specify relative or absolite wakeup time with higher resolution
+in form of
+.Vt sbintime_t .
+The parameter
+.Fa pr
+allows to specify wanted absolute event precision.
+The parameter
+.Fa flags
+allows to pass additional
+.Fn callout_reset_sbt
+flags.
+.Pp
Several of the sleep functions including
.Fn msleep ,
.Fn msleep_spin ,
@@ -301,7 +338,8 @@ A non-zero timeout was specified and the timeout expired.
.Xr mi_switch 9 ,
.Xr mtx_sleep 9 ,
.Xr rw_sleep 9 ,
-.Xr sx_sleep 9
+.Xr sx_sleep 9 ,
+.Xr timeout 9
.Sh HISTORY
The functions
.Fn sleep
diff --git a/share/man/man9/sleepqueue.9 b/share/man/man9/sleepqueue.9
index 22971d8..8557b9f 100644
--- a/share/man/man9/sleepqueue.9
+++ b/share/man/man9/sleepqueue.9
@@ -23,7 +23,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd January 8, 2010
+.Dd February 19, 2013
.Dt SLEEPQUEUE 9
.Os
.Sh NAME
@@ -41,6 +41,7 @@
.Nm sleepq_remove ,
.Nm sleepq_signal ,
.Nm sleepq_set_timeout ,
+.Nm sleepq_set_timeout_sbt ,
.Nm sleepq_sleepcnt ,
.Nm sleepq_timedwait ,
.Nm sleepq_timedwait_sig ,
@@ -79,6 +80,9 @@
.Fn sleepq_signal "void *wchan" "int flags" "int pri" "int queue"
.Ft void
.Fn sleepq_set_timeout "void *wchan" "int timo"
+.Ft void
+.Fn sleepq_set_timeout_sbt "void *wchan" "sbintime_t sbt" \
+"sbintime_t pr" "int flags"
.Ft u_int
.Fn sleepq_sleepcnt "void *wchan" "int queue"
.Ft int
@@ -231,6 +235,23 @@ The
.Fa timo
parameter should specify the timeout value in ticks.
.Pp
+.Fn sleepq_set_timeout_sbt
+function takes
+.Fa sbt
+argument instead of
+.Fa timo .
+It allows to specify relative or absolute wakeup time with higher resolution
+in form of
+.Vt sbintime_t .
+The parameter
+.Fa pr
+allows to specify wanted absolute event precision.
+The parameter
+.Fa flags
+allows to pass additional
+.Fn callout_reset_sbt
+flags.
+.Pp
The current thread may be marked interruptible by calling
.Fn sleepq_catch_signals
with
@@ -400,4 +421,5 @@ than manipulating sleep queues directly.
.Xr condvar 9 ,
.Xr runqueue 9 ,
.Xr scheduler 9 ,
-.Xr sleep 9
+.Xr sleep 9 ,
+.Xr timeout 9
diff --git a/share/man/man9/timeout.9 b/share/man/man9/timeout.9
index c671467..c63c201 100644
--- a/share/man/man9/timeout.9
+++ b/share/man/man9/timeout.9
@@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd November 1, 2012
+.Dd February 19, 2013
.Dt TIMEOUT 9
.Os
.Sh NAME
@@ -44,6 +44,9 @@
.Nm callout_reset ,
.Nm callout_reset_on ,
.Nm callout_reset_curcpu ,
+.Nm callout_reset_sbt ,
+.Nm callout_reset_sbt_on ,
+.Nm callout_reset_sbt_curcpu ,
.Nm callout_schedule ,
.Nm callout_schedule_on ,
.Nm callout_schedule_curcpu ,
@@ -82,6 +85,9 @@ struct callout_handle handle = CALLOUT_HANDLE_INITIALIZER(&handle);
.Fn callout_reset_on "struct callout *c" "int ticks" "timeout_t *func" \
"void *arg" "int cpu"
.Ft int
+.Fn callout_reset_sbt_on "struct callout *c" "sbintime_t sbt" \
+"sbintime_t pr" "timeout_t *func" "void *arg" "int cpu" "int flags"
+.Ft int
.Fn callout_reset_curcpu "struct callout *c" "int ticks" "timeout_t *func" \
"void *arg"
.Ft int
@@ -326,6 +332,33 @@ and
.Fn callout_schedule
but take an extra parameter specifying the target CPU for the callout.
.Pp
+The function
+.Fn callout_reset_sbt_on
+allows to get higher time resolution, taking relative or absolute time
+and precision instead of relative ticks count.
+If specified time is in past, it will be silently converted to present
+to run handler as soon as possible.
+.Pp
+The following
+.Fa flags
+may be specified:
+.Bl -tag -width ".Dv C_DIRECT_EXEC"
+.It Dv C_ALSOLUTE
+Handle the
+.Fa sbt
+argument as absolute time of the event since boot, or relative time otherwise.
+.It Dv C_DIRECT_EXEC
+Run handler directly from hardware interrupt context instead of softclock swi.
+It is faster, but puts more constraints on handlers.
+Handlers may use only spin mutexes for locking, and they must be fast because
+they run with absolute priority.
+.It Fn C_PREL
+Specifies relative event time precision as binary logarithm of time interval
+divided by acceptable time deviation: 1 -- 1/2, 2 -- 1/4, etc.
+Smaller value allows to aggregate more events in one timer interrupt to
+reduce processing overhead and power consumption.
+.El
+.Pp
The functions
.Fn callout_reset_curcpu
and
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 3988195..577cf4b 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -138,7 +138,10 @@ device sa # Sequential Access (tape etc)
device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
device ses # Enclosure Services (SES and SAF-TE)
-#device ctl # CAM Target Layer
+device ctl # CAM Target Layer
+options CTL_DISABLE # Disable CTL by default to save memory.
+ # Re-enable with kern.cam.ctl.disable=0 in
+ # /boot/loader.conf
# RAID controllers interfaced to the SCSI subsystem
device amr # AMI MegaRAID
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index a4371f7..7a41464 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -442,6 +442,13 @@ options SAFE_RNDTEST # enable rndtest support
#
# VirtIO support
+#
+# The virtio entry provides a generic bus for use by the device drivers.
+# It must be combined with an interface that communicates with the host.
+# Multiple such interfaces defined by the VirtIO specification. FreeBSD
+# only has support for PCI. Therefore, virtio_pci must be statically
+# compiled in or loaded as a module for the device drivers to function.
+#
device virtio # Generic VirtIO bus (required)
device virtio_pci # VirtIO PCI Interface
device vtnet # VirtIO Ethernet device
diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S
index 4c422e8..4257557 100644
--- a/sys/arm/arm/swtch.S
+++ b/sys/arm/arm/swtch.S
@@ -211,10 +211,12 @@ ENTRY(cpu_throw)
GET_PCPU(r6)
str r7, [r6, #PC_CURPCB]
+ add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
ENTRY(cpu_switch)
stmfd sp!, {r4-r7, lr}
+ sub sp, sp, #4;
mov r6, r2 /* Save the mutex */
.Lswitch_resume:
@@ -488,6 +490,7 @@ ENTRY(cpu_switch)
* Pull the registers that got pushed when either savectx() or
* cpu_switch() was called and return.
*/
+ add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
#ifdef DIAGNOSTIC
.Lswitch_bogons:
@@ -501,6 +504,7 @@ ENTRY(cpu_switch)
#endif
ENTRY(savectx)
stmfd sp!, {r4-r7, lr}
+ sub sp, sp, #4
/*
* r0 = pcb
*/
@@ -528,6 +532,7 @@ ENTRY(savectx)
bl _C_LABEL(vfp_store)
1:
#endif /* ARM_VFP_SUPPORT */
+ add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
ENTRY(fork_trampoline)
diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c
index 97b8b76..29a213f 100644
--- a/sys/arm/arm/vm_machdep.c
+++ b/sys/arm/arm/vm_machdep.c
@@ -73,6 +73,12 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
+/*
+ * struct switchframe must be a multiple of 8 for correct stack alignment
+ */
+CTASSERT(sizeof(struct switchframe) == 24);
+CTASSERT(sizeof(struct trapframe) == 76);
+
#ifndef NSFBUFS
#define NSFBUFS (512 + maxusers * 16)
#endif
@@ -131,8 +137,8 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
pcb2->un_32.pcb32_sp = td2->td_kstack +
USPACE_SVC_STACK_TOP - sizeof(*pcb2);
pmap_activate(td2);
- td2->td_frame = tf =
- (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
+ td2->td_frame = tf = (struct trapframe *)STACKALIGN(
+ pcb2->un_32.pcb32_sp - sizeof(struct trapframe));
*tf = *td1->td_frame;
sf = (struct switchframe *)tf - 1;
sf->sf_r4 = (u_int)fork_return;
@@ -142,6 +148,8 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
tf->tf_r0 = 0;
tf->tf_r1 = 0;
pcb2->un_32.pcb32_sp = (u_int)sf;
+ KASSERT((pcb2->un_32.pcb32_sp & 7) == 0,
+ ("cpu_fork: Incorrect stack alignment"));
/* Setup to release spin count in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
@@ -345,6 +353,8 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
tf->tf_r0 = 0;
td->td_pcb->un_32.pcb32_sp = (u_int)sf;
td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
+ KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
+ ("cpu_set_upcall: Incorrect stack alignment"));
/* Setup to release spin count in fork_exit(). */
td->td_md.md_spinlock_count = 1;
@@ -438,6 +448,8 @@ cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
sf->sf_r4 = (u_int)func;
sf->sf_r5 = (u_int)arg;
td->td_pcb->un_32.pcb32_sp = (u_int)sf;
+ KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
+ ("cpu_set_fork_handler: Incorrect stack alignment"));
}
/*
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_dma.c b/sys/arm/broadcom/bcm2835/bcm2835_dma.c
index 5e1c9dc..429ba7c 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_dma.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_dma.c
@@ -199,6 +199,7 @@ bcm_dma_reset(device_t dev, int ch)
/* Reset control block */
cb = sc->sc_dma_ch[ch].cb;
bzero(cb, sizeof(cb));
+ cb->info = INFO_WAIT_RESP;
}
static int
@@ -615,6 +616,7 @@ bcm_dma_intr(void *arg)
debug & DEBUG_ERROR_MASK, ch->ch);
bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch),
debug & DEBUG_ERROR_MASK);
+ bcm_dma_reset(sc->sc_dev, ch->ch);
}
if (cs & CS_INT) {
diff --git a/sys/arm/include/frame.h b/sys/arm/include/frame.h
index a24eccc..09ba55f 100644
--- a/sys/arm/include/frame.h
+++ b/sys/arm/include/frame.h
@@ -138,10 +138,14 @@ typedef struct irqframe {
} irqframe_t;
/*
- * Switch frame
+ * Switch frame.
+ *
+ * It is important this is a multiple of 8 bytes so the stack is correctly
+ * aligned when we create new threads.
*/
struct switchframe {
+ u_int pad; /* Used to pad the struct to a multiple of 8-bytes */
u_int sf_r4;
u_int sf_r5;
u_int sf_r6;
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 768332e..1884c7b 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -78,6 +78,8 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl_scsi_all.h>
#include <cam/ctl/ctl_error.h>
+#include "opt_ctl.h"
+
struct ctl_softc *control_softc = NULL;
/*
@@ -317,7 +319,11 @@ static int persis_offset;
static uint8_t ctl_pause_rtr;
static int ctl_is_single;
static int index_to_aps_page;
+#ifdef CTL_DISABLE
+int ctl_disable = 1;
+#else
int ctl_disable = 0;
+#endif
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, disable, CTLFLAG_RDTUN, &ctl_disable, 0,
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c
index 1920da4..3e55663 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/bpobj.h>
@@ -414,6 +414,12 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
0, FTAG, &subdb, 0));
+ /*
+ * Make sure that we are not asking dmu_write()
+ * to write more data than we have in our buffer.
+ */
+ VERIFY3U(subdb->db_size, >=,
+ numsubsub * sizeof (subobj));
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
numsubsub * sizeof (subobj), subdb->db_data, tx);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
index fc606d5..60c5c7f 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
@@ -1711,7 +1711,7 @@ dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
- doi->doi_max_offset = (dnp->dn_maxblkid + 1) * dn->dn_datablksz;
+ doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill;
diff --git a/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c b/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c
index d0db808..65991af 100644
--- a/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c
+++ b/sys/cddl/contrib/opensolaris/uts/intel/dtrace/fasttrap_isa.c
@@ -1034,6 +1034,7 @@ fasttrap_pid_probe(struct reg *rp)
#endif
PROC_LOCK(p);
+ _PHOLD(p);
pid = p->p_pid;
#if defined(sun)
pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
@@ -1059,6 +1060,7 @@ fasttrap_pid_probe(struct reg *rp)
#if defined(sun)
mutex_exit(pid_mtx);
#endif
+ _PRELE(p);
PROC_UNLOCK(p);
return (-1);
}
@@ -1732,7 +1734,6 @@ fasttrap_pid_probe(struct reg *rp)
ASSERT(i <= sizeof (scratch));
-
#if defined(sun)
if (fasttrap_copyout(scratch, (char *)addr, i)) {
#else
@@ -1794,7 +1795,11 @@ done:
}
rp->r_rip = new_pc;
- set_regs(curthread, rp);
+
+ PROC_LOCK(p);
+ proc_write_regs(curthread, rp);
+ _PRELE(p);
+ PROC_UNLOCK(p);
return (0);
}
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 131a1e7..5e659b9 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -393,11 +393,11 @@ dev/virtio/virtio.c optional virtio
dev/virtio/virtqueue.c optional virtio
dev/virtio/virtio_bus_if.m optional virtio
dev/virtio/virtio_if.m optional virtio
-dev/virtio/pci/virtio_pci.c optional virtio_pci virtio pci
-dev/virtio/network/if_vtnet.c optional vtnet virtio
-dev/virtio/block/virtio_blk.c optional virtio_blk virtio
-dev/virtio/balloon/virtio_balloon.c optional virtio_balloon virtio
-dev/virtio/scsi/virtio_scsi.c optional virtio_scsi virtio scbus
+dev/virtio/pci/virtio_pci.c optional virtio_pci
+dev/virtio/network/if_vtnet.c optional vtnet
+dev/virtio/block/virtio_blk.c optional virtio_blk
+dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
+dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
isa/syscons_isa.c optional sc
isa/vga_isa.c optional vga
kern/kern_clocksource.c standard
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index fd536af..f4c595a 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -374,11 +374,11 @@ dev/virtio/virtio.c optional virtio
dev/virtio/virtqueue.c optional virtio
dev/virtio/virtio_bus_if.m optional virtio
dev/virtio/virtio_if.m optional virtio
-dev/virtio/pci/virtio_pci.c optional virtio_pci virtio pci
-dev/virtio/network/if_vtnet.c optional vtnet virtio
-dev/virtio/block/virtio_blk.c optional virtio_blk virtio
-dev/virtio/balloon/virtio_balloon.c optional virtio_balloon virtio
-dev/virtio/scsi/virtio_scsi.c optional virtio_scsi virtio scbus
+dev/virtio/pci/virtio_pci.c optional virtio_pci
+dev/virtio/network/if_vtnet.c optional vtnet
+dev/virtio/block/virtio_blk.c optional virtio_blk
+dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
+dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
i386/acpica/acpi_machdep.c optional acpi
acpi_wakecode.o optional acpi \
dependency "$S/i386/acpica/acpi_wakecode.S assym.s" \
diff --git a/sys/conf/options b/sys/conf/options
index 75d0c97..ec9271d 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -329,6 +329,9 @@ SCSI_PT_DEFAULT_TIMEOUT opt_pt.h
# Options used only in cam/scsi/scsi_ses.c
SES_ENABLE_PASSTHROUGH opt_ses.h
+# Options used only in cam/ctl
+CTL_DISABLE opt_ctl.h
+
# Options used in dev/sym/ (Symbios SCSI driver).
SYM_SETUP_LP_PROBE_MAP opt_sym.h #-Low Priority Probe Map (bits)
# Allows the ncr to take precedence
diff --git a/sys/contrib/altq/altq/altq_hfsc.c b/sys/contrib/altq/altq/altq_hfsc.c
index bced145..f436bd5 100644
--- a/sys/contrib/altq/altq/altq_hfsc.c
+++ b/sys/contrib/altq/altq/altq_hfsc.c
@@ -104,14 +104,10 @@ static void update_ed(struct hfsc_class *, int);
static void update_d(struct hfsc_class *, int);
static void init_vf(struct hfsc_class *, int);
static void update_vf(struct hfsc_class *, int, u_int64_t);
-static ellist_t *ellist_alloc(void);
-static void ellist_destroy(ellist_t *);
static void ellist_insert(struct hfsc_class *);
static void ellist_remove(struct hfsc_class *);
static void ellist_update(struct hfsc_class *);
-struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
-static actlist_t *actlist_alloc(void);
-static void actlist_destroy(actlist_t *);
+struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t);
static void actlist_insert(struct hfsc_class *);
static void actlist_remove(struct hfsc_class *);
static void actlist_update(struct hfsc_class *);
@@ -204,12 +200,7 @@ hfsc_add_altq(struct pf_altq *a)
if (hif == NULL)
return (ENOMEM);
- hif->hif_eligible = ellist_alloc();
- if (hif->hif_eligible == NULL) {
- free(hif, M_DEVBUF);
- return (ENOMEM);
- }
-
+ TAILQ_INIT(&hif->hif_eligible);
hif->hif_ifq = &ifp->if_snd;
/* keep the state in pf_altq */
@@ -230,8 +221,6 @@ hfsc_remove_altq(struct pf_altq *a)
(void)hfsc_clear_interface(hif);
(void)hfsc_class_destroy(hif->hif_rootclass);
- ellist_destroy(hif->hif_eligible);
-
free(hif, M_DEVBUF);
return (0);
@@ -408,9 +397,7 @@ hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
if (cl->cl_q == NULL)
goto err_ret;
- cl->cl_actc = actlist_alloc();
- if (cl->cl_actc == NULL)
- goto err_ret;
+ TAILQ_INIT(&cl->cl_actc);
if (qlimit == 0)
qlimit = 50; /* use default */
@@ -544,8 +531,6 @@ hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
return (cl);
err_ret:
- if (cl->cl_actc != NULL)
- actlist_destroy(cl->cl_actc);
if (cl->cl_red != NULL) {
#ifdef ALTQ_RIO
if (q_is_rio(cl->cl_q))
@@ -620,8 +605,6 @@ hfsc_class_destroy(struct hfsc_class *cl)
IFQ_UNLOCK(cl->cl_hif->hif_ifq);
splx(s);
- actlist_destroy(cl->cl_actc);
-
if (cl->cl_red != NULL) {
#ifdef ALTQ_RIO
if (q_is_rio(cl->cl_q))
@@ -774,7 +757,7 @@ hfsc_dequeue(struct ifaltq *ifq, int op)
* find the class with the minimum deadline among
* the eligible classes.
*/
- if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
+ if ((cl = hfsc_get_mindl(hif, cur_time))
!= NULL) {
realtime = 1;
} else {
@@ -994,7 +977,7 @@ init_vf(struct hfsc_class *cl, int len)
go_active = 0;
if (go_active) {
- max_cl = actlist_last(cl->cl_parent->cl_actc);
+ max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
if (max_cl != NULL) {
/*
* set vt to the average of the min and max
@@ -1159,12 +1142,12 @@ update_cfmin(struct hfsc_class *cl)
struct hfsc_class *p;
u_int64_t cfmin;
- if (TAILQ_EMPTY(cl->cl_actc)) {
+ if (TAILQ_EMPTY(&cl->cl_actc)) {
cl->cl_cfmin = 0;
return;
}
cfmin = HT_INFINITY;
- TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
+ TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
if (p->cl_f == 0) {
cl->cl_cfmin = 0;
return;
@@ -1184,22 +1167,6 @@ update_cfmin(struct hfsc_class *cl)
* there is one eligible list per interface.
*/
-static ellist_t *
-ellist_alloc(void)
-{
- ellist_t *head;
-
- head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
- TAILQ_INIT(head);
- return (head);
-}
-
-static void
-ellist_destroy(ellist_t *head)
-{
- free(head, M_DEVBUF);
-}
-
static void
ellist_insert(struct hfsc_class *cl)
{
@@ -1207,13 +1174,13 @@ ellist_insert(struct hfsc_class *cl)
struct hfsc_class *p;
/* check the last entry first */
- if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
+ if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL ||
p->cl_e <= cl->cl_e) {
- TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
+ TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
return;
}
- TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
+ TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
if (cl->cl_e < p->cl_e) {
TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
return;
@@ -1227,7 +1194,7 @@ ellist_remove(struct hfsc_class *cl)
{
struct hfsc_if *hif = cl->cl_hif;
- TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
+ TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
}
static void
@@ -1245,11 +1212,11 @@ ellist_update(struct hfsc_class *cl)
return;
/* check the last entry */
- last = TAILQ_LAST(hif->hif_eligible, _eligible);
+ last = TAILQ_LAST(&hif->hif_eligible, elighead);
ASSERT(last != NULL);
if (last->cl_e <= cl->cl_e) {
- TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
- TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
+ TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
+ TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
return;
}
@@ -1259,7 +1226,7 @@ ellist_update(struct hfsc_class *cl)
*/
while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
if (cl->cl_e < p->cl_e) {
- TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
+ TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
return;
}
@@ -1269,11 +1236,11 @@ ellist_update(struct hfsc_class *cl)
/* find the class with the minimum deadline among the eligible classes */
struct hfsc_class *
-ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
+hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time)
{
struct hfsc_class *p, *cl = NULL;
- TAILQ_FOREACH(p, head, cl_ellist) {
+ TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
if (p->cl_e > cur_time)
break;
if (cl == NULL || p->cl_d < cl->cl_d)
@@ -1287,34 +1254,20 @@ ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
* by their virtual time.
* each intermediate class has one active children list.
*/
-static actlist_t *
-actlist_alloc(void)
-{
- actlist_t *head;
-
- head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
- TAILQ_INIT(head);
- return (head);
-}
static void
-actlist_destroy(actlist_t *head)
-{
- free(head, M_DEVBUF);
-}
-static void
actlist_insert(struct hfsc_class *cl)
{
struct hfsc_class *p;
/* check the last entry first */
- if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
+ if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL
|| p->cl_vt <= cl->cl_vt) {
- TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
return;
}
- TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
+ TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) {
if (cl->cl_vt < p->cl_vt) {
TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
return;
@@ -1326,7 +1279,7 @@ actlist_insert(struct hfsc_class *cl)
static void
actlist_remove(struct hfsc_class *cl)
{
- TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
}
static void
@@ -1344,11 +1297,11 @@ actlist_update(struct hfsc_class *cl)
return;
/* check the last entry */
- last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
+ last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
ASSERT(last != NULL);
if (last->cl_vt <= cl->cl_vt) {
- TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
- TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
return;
}
@@ -1358,7 +1311,7 @@ actlist_update(struct hfsc_class *cl)
*/
while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
if (cl->cl_vt < p->cl_vt) {
- TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
return;
}
@@ -1371,7 +1324,7 @@ actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
{
struct hfsc_class *p;
- TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
+ TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
if (p->cl_f <= cur_time)
return (p);
}
diff --git a/sys/contrib/altq/altq/altq_hfsc.h b/sys/contrib/altq/altq/altq_hfsc.h
index 91ba3d1..d04b378 100644
--- a/sys/contrib/altq/altq/altq_hfsc.h
+++ b/sys/contrib/altq/altq/altq_hfsc.h
@@ -218,16 +218,6 @@ struct runtime_sc {
u_int64_t ism2; /* scaled inverse-slope of the 2nd segment */
};
-/* for TAILQ based ellist and actlist implementation */
-struct hfsc_class;
-typedef TAILQ_HEAD(_eligible, hfsc_class) ellist_t;
-typedef TAILQ_ENTRY(hfsc_class) elentry_t;
-typedef TAILQ_HEAD(_active, hfsc_class) actlist_t;
-typedef TAILQ_ENTRY(hfsc_class) actentry_t;
-#define ellist_first(s) TAILQ_FIRST(s)
-#define actlist_first(s) TAILQ_FIRST(s)
-#define actlist_last(s) TAILQ_LAST(s, _active)
-
struct hfsc_class {
u_int cl_id; /* class id (just for debug) */
u_int32_t cl_handle; /* class handle */
@@ -277,10 +267,10 @@ struct hfsc_class {
u_int cl_vtperiod; /* vt period sequence no */
u_int cl_parentperiod; /* parent's vt period seqno */
int cl_nactive; /* number of active children */
- actlist_t *cl_actc; /* active children list */
- actentry_t cl_actlist; /* active children list entry */
- elentry_t cl_ellist; /* eligible list entry */
+ TAILQ_HEAD(acthead, hfsc_class) cl_actc; /* active children list */
+ TAILQ_ENTRY(hfsc_class) cl_actlist; /* active children list entry */
+ TAILQ_ENTRY(hfsc_class) cl_ellist; /* eligible list entry */
struct {
struct pktcntr xmit_cnt;
@@ -304,7 +294,7 @@ struct hfsc_if {
u_int hif_packets; /* # of packets in the tree */
u_int hif_classid; /* class id sequence number */
- ellist_t *hif_eligible; /* eligible list */
+ TAILQ_HEAD(elighead, hfsc_class) hif_eligible; /* eligible list */
#ifdef ALTQ3_CLFIER_COMPAT
struct acc_classifier hif_classifier;
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
index 9b721a8..32c710a 100644
--- a/sys/dev/drm2/drmP.h
+++ b/sys/dev/drm2/drmP.h
@@ -228,6 +228,7 @@ typedef void irqreturn_t;
#define IRQ_NONE /* nothing */
#define unlikely(x) __builtin_expect(!!(x), 0)
+#define likely(x) __builtin_expect(!!(x), 1)
#define container_of(ptr, type, member) ({ \
__typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
@@ -905,6 +906,7 @@ struct drm_device {
struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
+ void *drm_ttm_bo;
struct unrhdr *drw_unrhdr;
/* RB tree of drawable infos */
RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
@@ -1301,10 +1303,14 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
-int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
- struct vm_object **obj_res, int nprot);
+int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **obj_res, int nprot);
void drm_gem_pager_dtr(void *obj);
+struct ttm_bo_device;
+int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **obj_res, int nprot);
+
void drm_device_lock_mtx(struct drm_device *dev);
void drm_device_unlock_mtx(struct drm_device *dev);
int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
diff --git a/sys/dev/drm2/drm_drv.c b/sys/dev/drm2/drm_drv.c
index 483aff5..c45bda1 100644
--- a/sys/dev/drm2/drm_drv.c
+++ b/sys/dev/drm2/drm_drv.c
@@ -58,6 +58,8 @@ static int drm_load(struct drm_device *dev);
static void drm_unload(struct drm_device *dev);
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist);
+static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **obj_res, int nprot);
static int
drm_modevent(module_t mod, int type, void *data)
@@ -187,7 +189,7 @@ static struct cdevsw drm_cdevsw = {
.d_ioctl = drm_ioctl,
.d_poll = drm_poll,
.d_mmap = drm_mmap,
- .d_mmap_single = drm_gem_mmap_single,
+ .d_mmap_single = drm_mmap_single,
.d_name = "drm",
.d_flags = D_TRACKCLOSE
};
@@ -955,6 +957,23 @@ drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
return (0);
}
+static int
+drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj_res, int nprot)
+{
+ struct drm_device *dev;
+
+ dev = drm_get_device_from_kdev(kdev);
+ if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
+ return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
+ } else if (dev->drm_ttm_bo != NULL) {
+ return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size,
+ obj_res, nprot));
+ } else {
+ return (ENODEV);
+ }
+}
+
#if DRM_LINUX
#include <sys/sysproto.h>
diff --git a/sys/dev/drm2/drm_gem.c b/sys/dev/drm2/drm_gem.c
index f2c3e08..a792839 100644
--- a/sys/dev/drm2/drm_gem.c
+++ b/sys/dev/drm2/drm_gem.c
@@ -441,16 +441,12 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
}
int
-drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
{
- struct drm_device *dev;
struct drm_gem_object *gem_obj;
struct vm_object *vm_obj;
- dev = drm_get_device_from_kdev(kdev);
- if ((dev->driver->driver_features & DRIVER_GEM) == 0)
- return (ENODEV);
DRM_LOCK(dev);
gem_obj = drm_gem_object_from_offset(dev, *offset);
if (gem_obj == NULL) {
diff --git a/sys/dev/drm2/drm_global.c b/sys/dev/drm2/drm_global.c
new file mode 100644
index 0000000..992d061
--- /dev/null
+++ b/sys/dev/drm2/drm_global.c
@@ -0,0 +1,110 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_global.h>
+
+MALLOC_DEFINE(M_DRM_GLOBAL, "drm_global", "DRM Global Items");
+
+struct drm_global_item {
+ struct sx mutex;
+ void *object;
+ int refcount;
+};
+
+static struct drm_global_item glob[DRM_GLOBAL_NUM];
+
+void drm_global_init(void)
+{
+ int i;
+
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
+ sx_init(&item->mutex, "drmgi");
+ item->object = NULL;
+ item->refcount = 0;
+ }
+}
+
+void drm_global_release(void)
+{
+ int i;
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
+ MPASS(item->object == NULL);
+ MPASS(item->refcount == 0);
+ sx_destroy(&item->mutex);
+ }
+}
+
+int drm_global_item_ref(struct drm_global_reference *ref)
+{
+ int ret;
+ struct drm_global_item *item = &glob[ref->global_type];
+ void *object;
+
+ sx_xlock(&item->mutex);
+ if (item->refcount == 0) {
+ item->object = malloc(ref->size, M_DRM_GLOBAL,
+ M_WAITOK | M_ZERO);
+
+ ref->object = item->object;
+ ret = ref->init(ref);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ }
+ ++item->refcount;
+ ref->object = item->object;
+ object = item->object;
+ sx_xunlock(&item->mutex);
+ return 0;
+out_err:
+ sx_xunlock(&item->mutex);
+ item->object = NULL;
+ return ret;
+}
+
+void drm_global_item_unref(struct drm_global_reference *ref)
+{
+ struct drm_global_item *item = &glob[ref->global_type];
+
+ sx_xlock(&item->mutex);
+ MPASS(item->refcount != 0);
+ MPASS(ref->object == item->object);
+ if (--item->refcount == 0) {
+ ref->release(ref);
+ item->object = NULL;
+ }
+ sx_xunlock(&item->mutex);
+}
diff --git a/sys/dev/drm2/drm_global.h b/sys/dev/drm2/drm_global.h
new file mode 100644
index 0000000..f45b015
--- /dev/null
+++ b/sys/dev/drm2/drm_global.h
@@ -0,0 +1,56 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _DRM_GLOBAL_H_
+#define _DRM_GLOBAL_H_
+enum drm_global_types {
+ DRM_GLOBAL_TTM_MEM = 0,
+ DRM_GLOBAL_TTM_BO,
+ DRM_GLOBAL_TTM_OBJECT,
+ DRM_GLOBAL_NUM
+};
+
+struct drm_global_reference {
+ enum drm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct drm_global_reference *);
+ void (*release) (struct drm_global_reference *);
+};
+
+extern void drm_global_init(void);
+extern void drm_global_release(void);
+extern int drm_global_item_ref(struct drm_global_reference *ref);
+extern void drm_global_item_unref(struct drm_global_reference *ref);
+
+MALLOC_DECLARE(M_DRM_GLOBAL);
+
+#endif
diff --git a/sys/dev/drm2/drm_mm.c b/sys/dev/drm2/drm_mm.c
index cb35fc0..3617b05 100644
--- a/sys/dev/drm2/drm_mm.c
+++ b/sys/dev/drm2/drm_mm.c
@@ -561,3 +561,40 @@ void drm_mm_takedown(struct drm_mm * mm)
KASSERT(mm->num_unused == 0, ("num_unused != 0"));
}
+
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+ struct drm_mm_node *entry;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ printf("%s 0x%08lx-0x%08lx: %8lu: used\n",
+ prefix, entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+ }
+ }
+ total = total_free + total_used;
+
+ printf("%s total: %lu, used %lu free %lu\n", prefix, total,
+ total_used, total_free);
+}
diff --git a/sys/dev/drm2/drm_mm.h b/sys/dev/drm2/drm_mm.h
index 7b173af..7150e24 100644
--- a/sys/dev/drm2/drm_mm.h
+++ b/sys/dev/drm2/drm_mm.h
@@ -182,4 +182,6 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
int drm_mm_scan_add_block(struct drm_mm_node *node);
int drm_mm_scan_remove_block(struct drm_mm_node *node);
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+
#endif
diff --git a/sys/dev/drm2/ttm/ttm_agp_backend.c b/sys/dev/drm2/ttm/ttm_agp_backend.c
new file mode 100644
index 0000000..48f2193
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_agp_backend.c
@@ -0,0 +1,145 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * Keith Packard.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <dev/drm2/ttm/ttm_placement.h>
+
+struct ttm_agp_backend {
+ struct ttm_tt ttm;
+ struct agp_memory *mem;
+ device_t bridge;
+};
+
+MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
+
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct drm_mm_node *node = bo_mem->mm_node;
+ struct agp_memory *mem;
+ int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ unsigned i;
+
+ mem = agp_alloc_memory(agp_be->bridge, AGP_USER_MEMORY, ttm->num_pages);
+ if (unlikely(mem == NULL))
+ return -ENOMEM;
+
+ mem->page_count = 0;
+ for (i = 0; i < ttm->num_pages; i++) {
+ vm_page_t page = ttm->pages[i];
+
+ if (!page)
+ page = ttm->dummy_read_page;
+
+ mem->pages[mem->page_count++] = page;
+ }
+ agp_be->mem = mem;
+
+ mem->is_flushed = 1;
+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+
+ ret = agp_bind_memory(mem, node->start);
+ if (ret)
+ pr_err("AGP Bind memory failed\n");
+
+ return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (agp_be->mem) {
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ agp_free_memory(agp_be->mem);
+ agp_be->mem = NULL;
+ }
+ return 0;
+}
+
+static void ttm_agp_destroy(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (agp_be->mem)
+ ttm_agp_unbind(ttm);
+ ttm_tt_fini(ttm);
+ free(agp_be, M_TTM_AGP);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+ .bind = ttm_agp_bind,
+ .unbind = ttm_agp_unbind,
+ .destroy = ttm_agp_destroy,
+};
+
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ device_t bridge,
+ unsigned long size, uint32_t page_flags,
+ vm_page_t dummy_read_page)
+{
+ struct ttm_agp_backend *agp_be;
+
+ agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
+
+ agp_be->mem = NULL;
+ agp_be->bridge = bridge;
+ agp_be->ttm.func = &ttm_agp_func;
+
+ if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ return NULL;
+ }
+
+ return &agp_be->ttm;
+}
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ return ttm_pool_populate(ttm);
+}
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_bo.c b/sys/dev/drm2/ttm/ttm_bo.c
new file mode 100644
index 0000000..12e5131
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo.c
@@ -0,0 +1,1820 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
+
+MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
+
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+ printf(" has_type: %d\n", man->has_type);
+ printf(" use_type: %d\n", man->use_type);
+ printf(" flags: 0x%08X\n", man->flags);
+ printf(" gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printf(" size: %ju\n", (uintmax_t)man->size);
+ printf(" available_caching: 0x%08X\n", man->available_caching);
+ printf(" default_caching: 0x%08X\n", man->default_caching);
+ if (mem_type != TTM_PL_SYSTEM)
+ (*man->func->debug)(man, TTM_PFX);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ int i, ret, mem_type;
+
+ printf("No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ printf(" placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
+ ttm_mem_type_debug(bo->bdev, mem_type);
+ }
+}
+
+#if 0
+static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
+ char *buffer)
+{
+
+ return snprintf(buffer, PAGE_SIZE, "%lu\n",
+ (unsigned long) atomic_read(&glob->bo_count));
+}
+#endif
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+ return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ size_t acc_size = bo->acc_size;
+
+ MPASS(atomic_read(&bo->list_kref) == 0);
+ MPASS(atomic_read(&bo->kref) == 0);
+ MPASS(atomic_read(&bo->cpu_writers) == 0);
+ MPASS(bo->sync_obj == NULL);
+ MPASS(bo->mem.mm_node == NULL);
+ MPASS(list_empty(&bo->lru));
+ MPASS(list_empty(&bo->ddestroy));
+
+ if (bo->ttm)
+ ttm_tt_destroy(bo->ttm);
+ atomic_dec(&bo->glob->bo_count);
+ if (bo->destroy)
+ bo->destroy(bo);
+ else {
+ free(bo, M_TTM_BO);
+ }
+ ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+}
+
+int
+ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttbowi";
+ } else {
+ flags = 0;
+ wmsg = "ttbowu";
+ }
+ while (!ttm_bo_is_reserved(bo)) {
+ ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
+ if (ret != 0)
+ break;
+ }
+ return (ret);
+}
+
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+
+ MPASS(ttm_bo_is_reserved(bo));
+
+ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+ MPASS(list_empty(&bo->lru));
+
+ man = &bdev->man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru);
+ refcount_acquire(&bo->list_kref);
+
+ if (bo->ttm != NULL) {
+ list_add_tail(&bo->swap, &bo->glob->swap_lru);
+ refcount_acquire(&bo->list_kref);
+ }
+ }
+}
+
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+ int put_count = 0;
+
+ if (!list_empty(&bo->swap)) {
+ list_del_init(&bo->swap);
+ ++put_count;
+ }
+ if (!list_empty(&bo->lru)) {
+ list_del_init(&bo->lru);
+ ++put_count;
+ }
+
+ /*
+ * TODO: Add a driver hook to delete from
+ * driver-specific LRU's here.
+ */
+
+ return put_count;
+}
+
+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ int ret;
+
+ while (unlikely(atomic_read(&bo->reserved) != 0)) {
+ /**
+ * Deadlock avoidance for multi-bo reserving.
+ */
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ atomic_set(&bo->reserved, 1);
+ if (use_sequence) {
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ if (unlikely((bo->val_seq - sequence < (1 << 31))
+ || !bo->seq_valid))
+ wakeup(bo);
+
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ } else {
+ bo->seq_valid = false;
+ }
+
+ return 0;
+}
+
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free)
+{
+ u_int old;
+
+ old = atomic_fetchadd_int(&bo->list_kref, -count);
+ if (old <= count) {
+ if (never_free)
+ panic("ttm_bo_ref_buf");
+ ttm_bo_release_list(bo);
+ }
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count = 0;
+ int ret;
+
+ mtx_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return ret;
+}
+
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+
+ mtx_lock(&glob->lru_lock);
+ ttm_bo_unreserve_locked(bo);
+ mtx_unlock(&glob->lru_lock);
+}
+
+/*
+ * Call bo->mutex locked.
+ */
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ int ret = 0;
+ uint32_t page_flags = 0;
+
+ TTM_ASSERT_LOCKED(&bo->mutex);
+ bo->ttm = NULL;
+
+ if (bdev->need_dma32)
+ page_flags |= TTM_PAGE_FLAG_DMA32;
+
+ switch (bo->type) {
+ case ttm_bo_type_device:
+ if (zero_alloc)
+ page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ case ttm_bo_type_kernel:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL))
+ ret = -ENOMEM;
+ break;
+ case ttm_bo_type_sg:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_SG,
+ glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL)) {
+ ret = -ENOMEM;
+ break;
+ }
+ bo->ttm->sg = bo->sg;
+ break;
+ default:
+ printf("[TTM] Illegal buffer object type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ bool evict, bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (old_is_pci || new_is_pci ||
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+ ret = ttm_mem_io_lock(old_man, true);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(old_man);
+ }
+
+ /*
+ * Create and bind a ttm if required.
+ */
+
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (bo->ttm == NULL) {
+ bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+ ret = ttm_bo_add_ttm(bo, zero);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+ if (ret)
+ goto out_err;
+
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(bo->ttm, mem);
+ if (ret)
+ goto out_err;
+ }
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
+ bo->mem = *mem;
+ mem->mm_node = NULL;
+ goto moved;
+ }
+ }
+
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
+
+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+ else if (bdev->driver->move)
+ ret = bdev->driver->move(bo, evict, interruptible,
+ no_wait_gpu, mem);
+ else
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+
+ if (ret) {
+ if (bdev->driver->move_notify) {
+ struct ttm_mem_reg tmp_mem = *mem;
+ *mem = bo->mem;
+ bo->mem = tmp_mem;
+ bdev->driver->move_notify(bo, mem);
+ bo->mem = *mem;
+ }
+
+ goto out_err;
+ }
+
+moved:
+ if (bo->evicted) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ printf("[TTM] Can not flush read caches\n");
+ bo->evicted = false;
+ }
+
+ if (bo->mem.mm_node) {
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bdev->man[bo->mem.mem_type].gpu_offset;
+ bo->cur_placement = bo->mem.placement;
+ } else
+ bo->offset = 0;
+
+ return 0;
+
+out_err:
+ new_man = &bdev->man[bo->mem.mem_type];
+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * Call bo::reserved.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+ if (bo->bdev->driver->move_notify)
+ bo->bdev->driver->move_notify(bo, NULL);
+
+ if (bo->ttm) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_mem_put(bo, &bo->mem);
+
+ atomic_set(&bo->reserved, 0);
+ wakeup(&bo);
+
+ /*
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
+ */
+ mb();
+}
+
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_bo_driver *driver = bdev->driver;
+ void *sync_obj = NULL;
+ int put_count;
+ int ret;
+
+ mtx_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+ mtx_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, true);
+ if (!ret && !bo->sync_obj) {
+ mtx_unlock(&bdev->fence_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+
+ mtx_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return;
+ }
+ if (bo->sync_obj)
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ mtx_unlock(&bdev->fence_lock);
+
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ }
+
+ refcount_acquire(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ mtx_unlock(&glob->lru_lock);
+
+ if (sync_obj) {
+ driver->sync_obj_flush(sync_obj);
+ driver->sync_obj_unref(&sync_obj);
+ }
+ taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+ ((hz / 100) < 1) ? 1 : hz / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs_and_unlock
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
+ * @interruptible Any sleeps should occur interruptibly.
+ * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count;
+ int ret;
+
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+
+ if (ret && !no_wait_gpu) {
+ void *sync_obj;
+
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ mtx_unlock(&bdev->fence_lock);
+
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
+ return ret;
+
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ mtx_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
+
+ mtx_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
+ if (ret) {
+ mtx_unlock(&glob->lru_lock);
+ return 0;
+ }
+ } else
+ mtx_unlock(&bdev->fence_lock);
+
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
+ list_del_init(&bo->ddestroy);
+ ++put_count;
+
+ mtx_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return 0;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_buffer_object *entry = NULL;
+ int ret = 0;
+
+ mtx_lock(&glob->lru_lock);
+ if (list_empty(&bdev->ddestroy))
+ goto out_unlock;
+
+ entry = list_first_entry(&bdev->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ refcount_acquire(&entry->list_kref);
+
+ for (;;) {
+ struct ttm_buffer_object *nentry = NULL;
+
+ if (entry->ddestroy.next != &bdev->ddestroy) {
+ nentry = list_first_entry(&entry->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ refcount_acquire(&nentry->list_kref);
+ }
+
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ if (!ret)
+ ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+ !remove_all);
+ else
+ mtx_unlock(&glob->lru_lock);
+
+ if (refcount_release(&entry->list_kref))
+ ttm_bo_release_list(entry);
+ entry = nentry;
+
+ if (ret || !entry)
+ goto out;
+
+ mtx_lock(&glob->lru_lock);
+ if (list_empty(&entry->ddestroy))
+ break;
+ }
+
+out_unlock:
+ mtx_unlock(&glob->lru_lock);
+out:
+ if (entry && refcount_release(&entry->list_kref))
+ ttm_bo_release_list(entry);
+ return ret;
+}
+
+static void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
+{
+ struct ttm_bo_device *bdev = arg;
+
+ if (ttm_bo_delayed_delete(bdev, false)) {
+ taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+ ((hz / 100) < 1) ? 1 : hz / 100);
+ }
+}
+
+static void ttm_bo_release(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ rw_wlock(&bdev->vm_lock);
+ if (likely(bo->vm_node != NULL)) {
+ RB_REMOVE(ttm_bo_device_buffer_objects,
+ &bdev->addr_space_rb, bo);
+ drm_mm_put_block(bo->vm_node);
+ bo->vm_node = NULL;
+ }
+ rw_wunlock(&bdev->vm_lock);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
+ ttm_bo_cleanup_refs_or_queue(bo);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo = *p_bo;
+
+ *p_bo = NULL;
+ if (refcount_release(&bo->kref))
+ ttm_bo_release(bo);
+}
+
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+ int pending;
+
+ taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
+ if (pending)
+ taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
+ return (pending);
+}
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+ if (resched) {
+ taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
+ ((hz / 100) < 1) ? 1 : hz / 100);
+ }
+}
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg evict_mem;
+ struct ttm_placement placement;
+ int ret = 0;
+
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ mtx_unlock(&bdev->fence_lock);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTART) {
+ printf("[TTM] Failed to expire sync object before buffer eviction\n");
+ }
+ goto out;
+ }
+
+ MPASS(ttm_bo_is_reserved(bo));
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved_vm = false;
+ evict_mem.bus.io_reserved_count = 0;
+
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait_gpu);
+ if (ret) {
+ if (ret != -ERESTART) {
+ printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
+ bo);
+ ttm_bo_mem_space_debug(bo, &placement);
+ }
+ goto out;
+ }
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+ no_wait_gpu);
+ if (ret) {
+ if (ret != -ERESTART)
+ printf("[TTM] Buffer eviction failed\n");
+ ttm_bo_mem_put(bo, &evict_mem);
+ goto out;
+ }
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_buffer_object *bo;
+ int ret = -EBUSY, put_count;
+
+ mtx_lock(&glob->lru_lock);
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+
+ refcount_acquire(&bo->list_kref);
+
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ MPASS(ret == 0);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
+ ttm_bo_unreserve(bo);
+
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+}
+
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
+
+ if (mem->mm_node)
+ (*man->func->put_node)(man, mem);
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ int ret;
+
+ do {
+ ret = (*man->func->get_node)(man, bo, placement, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ if (mem->mm_node)
+ break;
+ ret = ttm_mem_evict_first(bdev, mem_type,
+ interruptible, no_wait_gpu);
+ if (unlikely(ret != 0))
+ return ret;
+ } while (1);
+ if (mem->mm_node == NULL)
+ return -ENOMEM;
+ mem->mem_type = mem_type;
+ return 0;
+}
+
+static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+ uint32_t cur_placement,
+ uint32_t proposed_placement)
+{
+ uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
+ uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
+
+ /**
+ * Keep current caching if possible.
+ */
+
+ if ((cur_placement & caching) != 0)
+ result |= (cur_placement & caching);
+ else if ((man->default_caching & caching) != 0)
+ result |= man->default_caching;
+ else if ((TTM_PL_FLAG_CACHED & caching) != 0)
+ result |= TTM_PL_FLAG_CACHED;
+ else if ((TTM_PL_FLAG_WC & caching) != 0)
+ result |= TTM_PL_FLAG_WC;
+ else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
+ result |= TTM_PL_FLAG_UNCACHED;
+
+ return result;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ uint32_t mem_type,
+ uint32_t proposed_placement,
+ uint32_t *masked_placement)
+{
+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+ if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+ return false;
+
+ if ((proposed_placement & man->available_caching) == 0)
+ return false;
+
+ cur_flags |= (proposed_placement & man->available_caching);
+
+ *masked_placement = cur_flags;
+ return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver. If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ uint32_t cur_flags = 0;
+ bool type_found = false;
+ bool type_ok = false;
+ bool has_erestartsys = false;
+ int i, ret;
+
+ mem->mm_node = NULL;
+ for (i = 0; i < placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
+ man = &bdev->man[mem_type];
+
+ type_ok = ttm_bo_mt_compatible(man,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
+
+ if (!type_ok)
+ continue;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
+
+ if (mem_type == TTM_PL_SYSTEM)
+ break;
+
+ if (man->has_type && man->use_type) {
+ type_found = true;
+ ret = (*man->func->get_node)(man, bo, placement, mem);
+ if (unlikely(ret))
+ return ret;
+ }
+ if (mem->mm_node)
+ break;
+ }
+
+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ return 0;
+ }
+
+ if (!type_found)
+ return -EINVAL;
+
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
+ man = &bdev->man[mem_type];
+ if (!man->has_type)
+ continue;
+ if (!ttm_bo_mt_compatible(man,
+ mem_type,
+ placement->busy_placement[i],
+ &cur_flags))
+ continue;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
+
+
+ if (mem_type == TTM_PL_SYSTEM) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ mem->mm_node = NULL;
+ return 0;
+ }
+
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait_gpu);
+ if (ret == 0 && mem->mm_node) {
+ mem->placement = cur_flags;
+ return 0;
+ }
+ if (ret == -ERESTART)
+ has_erestartsys = true;
+ }
+ ret = (has_erestartsys) ? -ERESTART : -ENOMEM;
+ return ret;
+}
+
+static
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ int ret = 0;
+ struct ttm_mem_reg mem;
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ MPASS(ttm_bo_is_reserved(bo));
+
+ /*
+ * FIXME: It's possible to pipeline buffer moves.
+ * Have the driver move function wait for idle when necessary,
+ * instead of doing it here.
+ */
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ mtx_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
+ mem.num_pages = bo->num_pages;
+ mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved_vm = false;
+ mem.bus.io_reserved_count = 0;
+ /*
+ * Determine where to move the buffer.
+ */
+ ret = ttm_bo_mem_space(bo, placement, &mem,
+ interruptible, no_wait_gpu);
+ if (ret)
+ goto out_unlock;
+ ret = ttm_bo_handle_move_mem(bo, &mem, false,
+ interruptible, no_wait_gpu);
+out_unlock:
+ if (ret && mem.mm_node)
+ ttm_bo_mem_put(bo, &mem);
+ return ret;
+}
+
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ int i;
+
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
+ return -1;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
+}
+
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu)
+{
+ int ret;
+
+ MPASS(ttm_bo_is_reserved(bo));
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
+ /*
+ * Check whether we need to move buffer.
+ */
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
+ }
+ /*
+ * We might need to add a TTM.
+ */
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ ret = ttm_bo_add_ttm(bo, true);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ MPASS(!((placement->fpfn || placement->lpfn) &&
+ (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
+
+ return 0;
+}
+
+int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct vm_object *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
+ void (*destroy) (struct ttm_buffer_object *))
+{
+ int ret = 0;
+ unsigned long num_pages;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (ret) {
+ printf("[TTM] Out of kernel memory\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ free(bo, M_TTM_BO);
+ return -ENOMEM;
+ }
+
+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (num_pages == 0) {
+ printf("[TTM] Illegal buffer object size\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ free(bo, M_TTM_BO);
+ ttm_mem_global_free(mem_glob, acc_size);
+ return -EINVAL;
+ }
+ bo->destroy = destroy;
+
+ refcount_init(&bo->kref, 1);
+ refcount_init(&bo->list_kref, 1);
+ atomic_set(&bo->cpu_writers, 0);
+ atomic_set(&bo->reserved, 1);
+ INIT_LIST_HEAD(&bo->lru);
+ INIT_LIST_HEAD(&bo->ddestroy);
+ INIT_LIST_HEAD(&bo->swap);
+ INIT_LIST_HEAD(&bo->io_reserve_lru);
+ bo->bdev = bdev;
+ bo->glob = bdev->glob;
+ bo->type = type;
+ bo->num_pages = num_pages;
+ bo->mem.size = num_pages << PAGE_SHIFT;
+ bo->mem.mem_type = TTM_PL_SYSTEM;
+ bo->mem.num_pages = bo->num_pages;
+ bo->mem.mm_node = NULL;
+ bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved_vm = false;
+ bo->mem.bus.io_reserved_count = 0;
+ bo->priv_flags = 0;
+ bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+ bo->seq_valid = false;
+ bo->persistent_swap_storage = persistent_swap_storage;
+ bo->acc_size = acc_size;
+ bo->sg = sg;
+ atomic_inc(&bo->glob->bo_count);
+
+ ret = ttm_bo_check_placement(bo, placement);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ /*
+ * For ttm_bo_type_device buffers, allocate
+ * address space from the device.
+ */
+ if (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg) {
+ ret = ttm_bo_setup_vm(bo);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (ret)
+ goto out_err;
+
+ ttm_bo_unreserve(bo);
+ return 0;
+
+out_err:
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ return ret;
+}
+
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
+ return size;
+}
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ return size;
+}
+
+int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct vm_object *persistent_swap_storage,
+ struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo;
+ size_t acc_size;
+ int ret;
+
+ bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+ ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+ interruptible, persistent_swap_storage, acc_size,
+ NULL, NULL);
+ if (likely(ret == 0))
+ *p_bo = bo;
+
+ return ret;
+}
+
+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+ unsigned mem_type, bool allow_errors)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_bo_global *glob = bdev->glob;
+ int ret;
+
+ /*
+ * Can't use standard list traversal since we're unlocking.
+ */
+
+ mtx_lock(&glob->lru_lock);
+ while (!list_empty(&man->lru)) {
+ mtx_unlock(&glob->lru_lock);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printf("[TTM] Cleanup eviction failed\n");
+ }
+ }
+ mtx_lock(&glob->lru_lock);
+ }
+ mtx_unlock(&glob->lru_lock);
+ return 0;
+}
+
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+ struct ttm_mem_type_manager *man;
+ int ret = -EINVAL;
+
+ if (mem_type >= TTM_NUM_MEM_TYPES) {
+ printf("[TTM] Illegal memory type %d\n", mem_type);
+ return ret;
+ }
+ man = &bdev->man[mem_type];
+
+ if (!man->has_type) {
+ printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
+ mem_type);
+ return ret;
+ }
+
+ man->use_type = false;
+ man->has_type = false;
+
+ ret = 0;
+ if (mem_type > 0) {
+ ttm_bo_force_list_clean(bdev, mem_type, false);
+
+ ret = (*man->func->takedown)(man);
+ }
+
+ return ret;
+}
+
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+ printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
+ return -EINVAL;
+ }
+
+ if (!man->has_type) {
+ printf("[TTM] Memory type %u has not been initialized\n", mem_type);
+ return 0;
+ }
+
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
+}
+
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_size)
+{
+ int ret = -EINVAL;
+ struct ttm_mem_type_manager *man;
+
+ MPASS(type < TTM_NUM_MEM_TYPES);
+ man = &bdev->man[type];
+ MPASS(!man->has_type);
+ man->io_reserve_fastpath = true;
+ man->use_io_reserve_lru = false;
+ sx_init(&man->io_reserve_mutex, "ttmman");
+ INIT_LIST_HEAD(&man->io_reserve_lru);
+
+ ret = bdev->driver->init_mem_type(bdev, type, man);
+ if (ret)
+ return ret;
+ man->bdev = bdev;
+
+ ret = 0;
+ if (type != TTM_PL_SYSTEM) {
+ ret = (*man->func->init)(man, p_size);
+ if (ret)
+ return ret;
+ }
+ man->has_type = true;
+ man->use_type = true;
+ man->size = p_size;
+
+ INIT_LIST_HEAD(&man->lru);
+
+ return 0;
+}
+
+static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
+{
+
+ ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
+ vm_page_free(glob->dummy_read_page);
+ free(glob, M_DRM_GLOBAL);
+}
+
+void ttm_bo_global_release(struct drm_global_reference *ref)
+{
+ struct ttm_bo_global *glob = ref->object;
+
+ if (refcount_release(&glob->kobj_ref))
+ ttm_bo_global_kobj_release(glob);
+}
+
+int ttm_bo_global_init(struct drm_global_reference *ref)
+{
+ struct ttm_bo_global_ref *bo_ref =
+ container_of(ref, struct ttm_bo_global_ref, ref);
+ struct ttm_bo_global *glob = ref->object;
+ int ret;
+
+ sx_init(&glob->device_list_mutex, "ttmdlm");
+ mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
+ glob->mem_glob = bo_ref->mem_glob;
+ glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
+ VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
+ 1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
+
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_drp;
+ }
+
+ INIT_LIST_HEAD(&glob->swap_lru);
+ INIT_LIST_HEAD(&glob->device_list);
+
+ ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
+ ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
+ if (unlikely(ret != 0)) {
+ printf("[TTM] Could not register buffer object swapout\n");
+ goto out_no_shrink;
+ }
+
+ atomic_set(&glob->bo_count, 0);
+
+ refcount_init(&glob->kobj_ref, 1);
+ return (0);
+
+out_no_shrink:
+ vm_page_free(glob->dummy_read_page);
+out_no_drp:
+ free(glob, M_DRM_GLOBAL);
+ return ret;
+}
+
+int ttm_bo_device_release(struct ttm_bo_device *bdev)
+{
+ int ret = 0;
+ unsigned i = TTM_NUM_MEM_TYPES;
+ struct ttm_mem_type_manager *man;
+ struct ttm_bo_global *glob = bdev->glob;
+
+ while (i--) {
+ man = &bdev->man[i];
+ if (man->has_type) {
+ man->use_type = false;
+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+ ret = -EBUSY;
+ printf("[TTM] DRM memory manager type %d is not clean\n",
+ i);
+ }
+ man->has_type = false;
+ }
+ }
+
+ sx_xlock(&glob->device_list_mutex);
+ list_del(&bdev->device_list);
+ sx_xunlock(&glob->device_list_mutex);
+
+ if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
+ taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
+
+ while (ttm_bo_delayed_delete(bdev, true))
+ ;
+
+ mtx_lock(&glob->lru_lock);
+ if (list_empty(&bdev->ddestroy))
+ TTM_DEBUG("Delayed destroy list was clean\n");
+
+ if (list_empty(&bdev->man[0].lru))
+ TTM_DEBUG("Swap list was clean\n");
+ mtx_unlock(&glob->lru_lock);
+
+ MPASS(drm_mm_clean(&bdev->addr_space_mm));
+ rw_wlock(&bdev->vm_lock);
+ drm_mm_takedown(&bdev->addr_space_mm);
+ rw_wunlock(&bdev->vm_lock);
+
+ return ret;
+}
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
+ uint64_t file_page_offset,
+ bool need_dma32)
+{
+ int ret = -EINVAL;
+
+ rw_init(&bdev->vm_lock, "ttmvml");
+ bdev->driver = driver;
+
+ memset(bdev->man, 0, sizeof(bdev->man));
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
+ if (unlikely(ret != 0))
+ goto out_no_sys;
+
+ RB_INIT(&bdev->addr_space_rb);
+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+ if (unlikely(ret != 0))
+ goto out_no_addr_mm;
+
+ TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
+ ttm_bo_delayed_workqueue, bdev);
+ INIT_LIST_HEAD(&bdev->ddestroy);
+ bdev->dev_mapping = NULL;
+ bdev->glob = glob;
+ bdev->need_dma32 = need_dma32;
+ bdev->val_seq = 0;
+ mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
+ sx_xlock(&glob->device_list_mutex);
+ list_add_tail(&bdev->device_list, &glob->device_list);
+ sx_xunlock(&glob->device_list_mutex);
+
+ return 0;
+out_no_addr_mm:
+ ttm_bo_clean_mm(bdev, 0);
+out_no_sys:
+ return ret;
+}
+
+/*
+ * buffer object vm functions.
+ */
+
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (mem->mem_type == TTM_PL_SYSTEM)
+ return false;
+
+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
+ return false;
+
+ if (mem->placement & TTM_PL_FLAG_CACHED)
+ return false;
+ }
+ return true;
+}
+
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ /* off_t offset = (off_t)bo->addr_space_offset;XXXKIB */
+ /* off_t holelen = ((off_t)bo->mem.num_pages) << PAGE_SHIFT;XXXKIB */
+
+ if (!bdev->dev_mapping)
+ return;
+ /* unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); XXXKIB */
+ ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ ttm_mem_io_lock(man, false);
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(man);
+}
+
+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ /* The caller acquired bdev->vm_lock. */
+ RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
+}
+
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret;
+
+retry_pre_get:
+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
+ if (unlikely(ret != 0))
+ return ret;
+
+ rw_wlock(&bdev->vm_lock);
+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
+ bo->mem.num_pages, 0, 0);
+
+ if (unlikely(bo->vm_node == NULL)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
+ bo->mem.num_pages, 0);
+
+ if (unlikely(bo->vm_node == NULL)) {
+ rw_wunlock(&bdev->vm_lock);
+ goto retry_pre_get;
+ }
+
+ ttm_bo_vm_insert_rb(bo);
+ rw_wunlock(&bdev->vm_lock);
+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
+
+ return 0;
+out_unlock:
+ rw_wunlock(&bdev->vm_lock);
+ return ret;
+}
+
+int ttm_bo_wait(struct ttm_buffer_object *bo,
+ bool lazy, bool interruptible, bool no_wait)
+{
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ struct ttm_bo_device *bdev = bo->bdev;
+ void *sync_obj;
+ int ret = 0;
+
+ if (likely(bo->sync_obj == NULL))
+ return 0;
+
+ while (bo->sync_obj) {
+
+ if (driver->sync_obj_signaled(bo->sync_obj)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ mtx_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&tmp_obj);
+ mtx_lock(&bdev->fence_lock);
+ continue;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ mtx_unlock(&bdev->fence_lock);
+ ret = driver->sync_obj_wait(sync_obj,
+ lazy, interruptible);
+ if (unlikely(ret != 0)) {
+ driver->sync_obj_unref(&sync_obj);
+ mtx_lock(&bdev->fence_lock);
+ return ret;
+ }
+ mtx_lock(&bdev->fence_lock);
+ if (likely(bo->sync_obj == sync_obj)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+ &bo->priv_flags);
+ mtx_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&sync_obj);
+ driver->sync_obj_unref(&tmp_obj);
+ mtx_lock(&bdev->fence_lock);
+ } else {
+ mtx_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&sync_obj);
+ mtx_lock(&bdev->fence_lock);
+ }
+ }
+ return 0;
+}
+
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+
+ /*
+ * Using ttm_bo_reserve makes sure the lru lists are updated.
+ */
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+ mtx_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true, no_wait);
+ mtx_unlock(&bdev->fence_lock);
+ if (likely(ret == 0))
+ atomic_inc(&bo->cpu_writers);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+{
+ atomic_dec(&bo->cpu_writers);
+}
+
+/**
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the bo_global::swap_lru list.
+ */
+
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+{
+ struct ttm_bo_global *glob =
+ container_of(shrink, struct ttm_bo_global, shrink);
+ struct ttm_buffer_object *bo;
+ int ret = -EBUSY;
+ int put_count;
+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+
+ mtx_lock(&glob->lru_lock);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+
+ refcount_acquire(&bo->list_kref);
+
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
+ mtx_unlock(&glob->lru_lock);
+
+ ttm_bo_list_ref_sub(bo, put_count, true);
+
+ /**
+ * Wait for GPU, then move to system cached.
+ */
+
+ mtx_lock(&bo->bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+ mtx_unlock(&bo->bdev->fence_lock);
+
+ if (unlikely(ret != 0))
+ goto out;
+
+ if ((bo->mem.placement & swap_placement) != swap_placement) {
+ struct ttm_mem_reg evict_mem;
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+ evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ evict_mem.mem_type = TTM_PL_SYSTEM;
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+ false, false);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+
+ ttm_bo_unmap_virtual(bo);
+
+ /**
+ * Swap out. Buffer will be swapped in again as soon as
+ * anyone tries to access a ttm page.
+ */
+
+ if (bo->bdev->driver->swap_notify)
+ bo->bdev->driver->swap_notify(bo);
+
+ ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
+out:
+
+ /**
+ *
+ * Unreserve without putting on LRU to avoid swapping out an
+ * already swapped buffer.
+ */
+
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ if (refcount_release(&bo->list_kref))
+ ttm_bo_release_list(bo);
+ return ret;
+}
+
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+{
+ while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+ ;
+}
diff --git a/sys/dev/drm2/ttm/ttm_bo_api.h b/sys/dev/drm2/ttm/ttm_bo_api.h
new file mode 100644
index 0000000..4b16ebd
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_api.h
@@ -0,0 +1,740 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include <dev/drm2/drmP.h>
+
+struct ttm_bo_device;
+
+struct drm_mm_node;
+
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @num_placement: number of preferred placements
+ * @placement: preferred placements
+ * @num_busy_placement: number of preferred placements when need to evict buffer
+ * @busy_placement: preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+ unsigned fpfn;
+ unsigned lpfn;
+ unsigned num_placement;
+ const uint32_t *placement;
+ unsigned num_busy_placement;
+ const uint32_t *busy_placement;
+};
+
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr: mapped virtual address
+ * @base: bus base address
+ * @is_iomem: is this io memory ?
+ * @size: size in byte
+ * @offset: offset from the base address
+ * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ void *addr;
+ unsigned long base;
+ unsigned long size;
+ unsigned long offset;
+ bool is_iomem;
+ bool io_reserved_vm;
+ uint64_t io_reserved_count;
+};
+
+
+/**
+ * struct ttm_mem_reg
+ *
+ * @mm_node: Memory manager node.
+ * @size: Requested size of memory region.
+ * @num_pages: Actual size of memory region in pages.
+ * @page_alignment: Page alignment.
+ * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+
+struct ttm_mem_reg {
+ void *mm_node;
+ unsigned long start;
+ unsigned long size;
+ unsigned long num_pages;
+ uint32_t page_alignment;
+ uint32_t mem_type;
+ uint32_t placement;
+ struct ttm_bus_placement bus;
+};
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device: These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
+ */
+
+enum ttm_bo_type {
+ ttm_bo_type_device,
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
+};
+
+struct ttm_tt;
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @bdev: Pointer to the buffer object device structure.
+ * @type: The bo type.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @num_pages: Actual number of pages.
+ * @addr_space_offset: Address space offset.
+ * @acc_size: Accounted size for this object.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is put on the delayed delete list.
+ * @list_kref: List reference count of this buffer object. This member is
+ * used to avoid destruction while the buffer object is still on a list.
+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
+ * keeps one refcount. When this refcount reaches zero,
+ * the object is destroyed.
+ * @event_queue: Queue for processes waiting on buffer object status change.
+ * @mem: structure describing current placement.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object.
+ * @ttm: TTM structure holding system pages.
+ * @evicted: Whether the object was evicted without user-space knowing.
+ * @cpu_writes: For synchronization. Number of cpu writers.
+ * @lru: List head for the lru list.
+ * @ddestroy: List head for the delayed destroy list.
+ * @swap: List head for swap LRU list.
+ * @val_seq: Sequence of the validation holding the @reserved lock.
+ * Used to avoid starvation when many processes compete to validate the
+ * buffer. This member is protected by the bo_device::lru_lock.
+ * @seq_valid: The value of @val_seq is valid. This value is protected by
+ * the bo_device::lru_lock.
+ * @reserved: Deadlock-free lock used for synchronization state transitions.
+ * @sync_obj: Pointer to a synchronization object.
+ * @priv_flags: Flags describing buffer object internal state.
+ * @vm_rb: Rb node for the vm rb tree.
+ * @vm_node: Address space manager node.
+ * @offset: The current GPU offset, which can have different meanings
+ * depending on the memory type. For SYSTEM type memory, it should be 0.
+ * @cur_placement: Hint of current placement.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+
+struct ttm_buffer_object {
+ /**
+ * Members constant at init.
+ */
+
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ enum ttm_bo_type type;
+ void (*destroy) (struct ttm_buffer_object *);
+ unsigned long num_pages;
+ uint64_t addr_space_offset;
+ size_t acc_size;
+
+ /**
+ * Members not needing protection.
+ */
+
+ u_int kref;
+ u_int list_kref;
+ /* wait_queue_head_t event_queue; */
+
+ /**
+ * Members protected by the bo::reserved lock.
+ */
+
+ struct ttm_mem_reg mem;
+ struct vm_object *persistent_swap_storage;
+ struct ttm_tt *ttm;
+ bool evicted;
+
+ /**
+ * Members protected by the bo::reserved lock only when written to.
+ */
+
+ atomic_t cpu_writers;
+
+ /**
+ * Members protected by the bdev::lru_lock.
+ */
+
+ struct list_head lru;
+ struct list_head ddestroy;
+ struct list_head swap;
+ struct list_head io_reserve_lru;
+ uint32_t val_seq;
+ bool seq_valid;
+
+ /**
+ * Members protected by the bdev::lru_lock
+ * only when written to.
+ */
+
+ atomic_t reserved;
+
+ /**
+ * Members protected by struct buffer_object_device::fence_lock
+ * In addition, setting sync_obj to anything else
+ * than NULL requires bo::reserved to be held. This allows for
+ * checking NULL while reserved but not holding the mentioned lock.
+ */
+
+ void *sync_obj;
+ unsigned long priv_flags;
+
+ /**
+ * Members protected by the bdev::vm_lock
+ */
+
+ RB_ENTRY(ttm_buffer_object) vm_rb;
+ struct drm_mm_node *vm_node;
+
+
+ /**
+ * Special members that are protected by the reserve lock
+ * and the bo::lock when written to. Can be read with
+ * either of these locks held.
+ */
+
+ unsigned long offset;
+ uint32_t cur_placement;
+
+ struct sg_table *sg;
+};
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+struct ttm_bo_kmap_obj {
+ void *virtual;
+ struct vm_page *page;
+ struct sf_buf *sf;
+ int num_pages;
+ unsigned long size;
+ enum {
+ ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
+ ttm_bo_map_vmap = 2,
+ ttm_bo_map_kmap = 3,
+ ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
+ } bo_kmap_type;
+ struct ttm_buffer_object *bo;
+};
+
+/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ *
+ * Returns a refcounted pointer to a buffer object.
+ */
+
+static inline struct ttm_buffer_object *
+ttm_bo_reference(struct ttm_buffer_object *bo)
+{
+ refcount_acquire(&bo->kref);
+ return bo;
+}
+
+/**
+ * ttm_bo_wait - wait for buffer idle.
+ *
+ * @bo: The buffer object.
+ * @interruptible: Use interruptible wait.
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * This function must be called with the bo::mutex held, and makes
+ * sure any previous rendering to the buffer is completed.
+ * Note: It might be necessary to block validations before the
+ * wait by reserving the buffer.
+ * Returns -EBUSY if no_wait is true and the buffer is busy.
+ * Returns -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+ bool interruptible, bool no_wait);
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu);
+
+/**
+ * ttm_bo_unref
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference and clear a pointer to a buffer object.
+ */
+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
+/**
+ * ttm_bo_lock_delayed_workqueue
+ *
+ * Prevent the delayed workqueue from running.
+ * Returns
+ * True if the workqueue was queued at the time
+ */
+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_unlock_delayed_workqueue
+ *
+ * Allows the delayed workqueue to run.
+ */
+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+ int resched);
+
+/**
+ * ttm_bo_synccpu_write_grab
+ *
+ * @bo: The buffer object:
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * Synchronizes a buffer object for CPU RW access. This means
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
+ * Returns
+ * -EBUSY if the buffer is busy and no_wait is true.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int
+ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
+/**
+ * ttm_bo_synccpu_write_release:
+ *
+ * @bo : The buffer object.
+ *
+ * Releases a synccpu lock.
+ */
+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+
+/**
+ * ttm_bo_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+
+extern int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interrubtible,
+ struct vm_object *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
+ void (*destroy) (struct ttm_buffer_object *));
+
+/**
+ * ttm_bo_synccpu_object_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep while waiting for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @p_bo: On successful completion *p_bo points to the created object.
+ *
+ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
+ * on that object. The destroy function is set to kfree().
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
+ */
+
+extern int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct vm_object *persistent_swap_storage,
+ struct ttm_buffer_object **p_bo);
+
+/**
+ * ttm_bo_check_placement
+ *
+ * @bo: the buffer object.
+ * @placement: placements
+ *
+ * Performs minimal validity checking on an intended change of
+ * placement flags.
+ * Returns
+ * -EINVAL: Intended change is invalid or not allowed.
+ */
+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+/**
+ * ttm_bo_init_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ * @p_size: size managed area in pages.
+ *
+ * Initialize a manager for a given memory type.
+ * Note: if part of driver firstopen, it must be protected from a
+ * potentially racing lastclose.
+ * Returns:
+ * -EINVAL: invalid size or memory type.
+ * -ENOMEM: Not enough memory.
+ * May also return driver-specified errors.
+ */
+
+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_size);
+/**
+ * ttm_bo_clean_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Take down a manager for a given memory type after first walking
+ * the LRU list to evict any buffers left alive.
+ *
+ * Normally, this function is part of lastclose() or unload(), and at that
+ * point there shouldn't be any buffers left created by user-space, since
+ * there should've been removed by the file descriptor release() method.
+ * However, before this function is run, make sure to signal all sync objects,
+ * and verify that the delayed delete queue is empty. The driver must also
+ * make sure that there are no NO_EVICT buffers present in this memory type
+ * when the call is made.
+ *
+ * If this function is part of a VT switch, the caller must make sure that
+ * there are no appications currently validating buffers before this
+ * function is called. The caller can do that by first taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: invalid or uninitialized memory type.
+ * -EBUSY: There are still buffers left in this memory type.
+ */
+
+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_bo_evict_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Evicts all buffers on the lru list of the memory type.
+ * This is normally part of a VT switch or an
+ * out-of-memory-space-due-to-fragmentation handler.
+ * The caller must make sure that there are no other processes
+ * currently validating buffers, and can do that by taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: Invalid or uninitialized memory type.
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
+ * evict a buffer.
+ */
+
+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+ bool *is_iomem)
+{
+ *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
+ return map->virtual;
+}
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+
+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
+
+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space. The address space will
+ * have the same size as the bo, and start at offset 0.
+ *
+ * This function is intended to be called by the fbdev mmap method
+ * if the fbdev address space is to be backed by a bo.
+ */
+
+/* XXXKIB
+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
+ struct ttm_buffer_object *bo);
+*/
+/**
+ * ttm_bo_mmap - mmap out of the ttm device address space.
+ *
+ * @filp: filp as input from the mmap method.
+ * @vma: vma as input from the mmap method.
+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ *
+ * This function is intended to be called by the device mmap method.
+ * if the device address space is to be backed by the bo manager.
+ */
+/* XXXKIB
+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct ttm_bo_device *bdev);
+*/
+/**
+ * ttm_bo_io
+ *
+ * @bdev: Pointer to the struct ttm_bo_device.
+ * @filp: Pointer to the struct file attempting to read / write.
+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
+ * @rbuf: User-space pointer to address of buffer to read into.
+ * Null on write.
+ * @count: Number of bytes to read / write.
+ * @f_pos: Pointer to current file position.
+ * @write: 1 for read, 0 for write.
+ *
+ * This function implements read / write into ttm buffer objects, and is
+ * intended to
+ * be called from the fops::read and fops::write method.
+ * Returns:
+ * See man (2) write, man(2) read. In particular,
+ * the function may return -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+
+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char *wbuf, char *rbuf,
+ size_t count, off_t *f_pos, bool write);
+
+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo: The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+ return atomic_read(&bo->reserved);
+}
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_bo_driver.h b/sys/dev/drm2/ttm/ttm_bo_driver.h
new file mode 100644
index 0000000..3f08976
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_driver.h
@@ -0,0 +1,1018 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_BO_DRIVER_H_
+#define _TTM_BO_DRIVER_H_
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_api.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/drm_global.h>
+#include <sys/rwlock.h>
+#include <sys/tree.h>
+
+struct ttm_backend_func {
+ /**
+ * struct ttm_backend_func member bind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ * memory type and location for binding.
+ *
+ * Bind the backend pages into the aperture in the location
+ * indicated by @bo_mem. This function should be able to handle
+ * differences between aperture and system page sizes.
+ */
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+ /**
+ * struct ttm_backend_func member unbind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Unbind previously bound backend pages. This function should be
+ * able to handle differences between aperture and system page sizes.
+ */
+ int (*unbind) (struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_backend_func member destroy
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*destroy) (struct ttm_tt *ttm);
+};
+
+#define TTM_PAGE_FLAG_WRITE (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
+
+enum ttm_caching_state {
+ tt_uncached,
+ tt_wc,
+ tt_cached
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+
+struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
+ struct vm_page *dummy_read_page;
+ struct vm_page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
+ struct ttm_bo_global *glob;
+ struct vm_object *swap_storage;
+ enum ttm_caching_state caching_state;
+ enum {
+ tt_bound,
+ tt_unbound,
+ tt_unpopulated,
+ } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+};
+
+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
+
+struct ttm_mem_type_manager;
+
+struct ttm_mem_type_manager_func {
+ /**
+ * struct ttm_mem_type_manager member init
+ *
+ * @man: Pointer to a memory type manager.
+ * @p_size: Implementation dependent, but typically the size of the
+ * range to be managed in pages.
+ *
+ * Called to initialize a private range manager. The function is
+ * expected to initialize the man::priv member.
+ * Returns 0 on success, negative error code on failure.
+ */
+ int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+ /**
+ * struct ttm_mem_type_manager member takedown
+ *
+ * @man: Pointer to a memory type manager.
+ *
+ * Called to undo the setup done in init. All allocated resources
+ * should be freed.
+ */
+ int (*takedown)(struct ttm_mem_type_manager *man);
+
+ /**
+ * struct ttm_mem_type_manager member get_node
+ *
+ * @man: Pointer to a memory type manager.
+ * @bo: Pointer to the buffer object we're allocating space for.
+ * @placement: Placement details.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function should allocate space in the memory type managed
+ * by @man. Placement details if
+ * applicable are given by @placement. If successful,
+ * @mem::mm_node should be set to a non-null value, and
+ * @mem::start should be set to a value identifying the beginning
+ * of the range allocated, and the function should return zero.
+ * If the memory region accommodate the buffer object, @mem::mm_node
+ * should be set to NULL, and the function should return 0.
+ * If a system error occurred, preventing the request to be fulfilled,
+ * the function should return a negative error code.
+ *
+ * Note that @mem::mm_node will only be dereferenced by
+ * struct ttm_mem_type_manager functions and optionally by the driver,
+ * which has knowledge of the underlying type.
+ *
+ * This function may not be called from within atomic context, so
+ * an implementation can and must use either a mutex or a spinlock to
+ * protect any data structures managing the space.
+ */
+ int (*get_node)(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem);
+
+ /**
+ * struct ttm_mem_type_manager member put_node
+ *
+ * @man: Pointer to a memory type manager.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function frees memory type resources previously allocated
+ * and that are identified by @mem::mm_node and @mem::start. May not
+ * be called from within atomic context.
+ */
+ void (*put_node)(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem);
+
+ /**
+ * struct ttm_mem_type_manager member debug
+ *
+ * @man: Pointer to a memory type manager.
+ * @prefix: Prefix to be used in printout to identify the caller.
+ *
+ * This function is called to print out the state of the memory
+ * type manager to aid debugging of out-of-memory conditions.
+ * It may not be called from within atomic context.
+ */
+ void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
+
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
+struct ttm_mem_type_manager {
+ struct ttm_bo_device *bdev;
+
+ /*
+ * No protection. Constant from start.
+ */
+
+ bool has_type;
+ bool use_type;
+ uint32_t flags;
+ unsigned long gpu_offset;
+ uint64_t size;
+ uint32_t available_caching;
+ uint32_t default_caching;
+ const struct ttm_mem_type_manager_func *func;
+ void *priv;
+ struct sx io_reserve_mutex;
+ bool use_io_reserve_lru;
+ bool io_reserve_fastpath;
+
+ /*
+ * Protected by @io_reserve_mutex:
+ */
+
+ struct list_head io_reserve_lru;
+
+ /*
+ * Protected by the global->lru_lock.
+ */
+
+ struct list_head lru;
+};
+
+/**
+ * struct ttm_bo_driver
+ *
+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
+ * has been evicted.
+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
+ * structure.
+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
+ * @move: Callback for a driver to hook in accelerated functions to
+ * move a buffer.
+ * If set to NULL, a potentially slow memcpy() move is used.
+ * @sync_obj_signaled: See ttm_fence_api.h
+ * @sync_obj_wait: See ttm_fence_api.h
+ * @sync_obj_flush: See ttm_fence_api.h
+ * @sync_obj_unref: See ttm_fence_api.h
+ * @sync_obj_ref: See ttm_fence_api.h
+ */
+
+struct ttm_bo_driver {
+ /**
+ * ttm_tt_create
+ *
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member invalidate_caches
+ *
+ * @bdev: the buffer object device.
+ * @flags: new placement of the rebound buffer object.
+ *
+ * A previosly evicted buffer has been rebound in a
+ * potentially new location. Tell the driver that it might
+ * consider invalidating read (texture) caches on the next command
+ * submission as a consequence.
+ */
+
+ int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
+ int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+
+ void(*evict_flags) (struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @interruptible: Use interruptible sleeps if possible when sleeping.
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
+ int (*move) (struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+ /**
+ * struct ttm_bo_driver_member verify_access
+ *
+ * @bo: Pointer to a buffer object.
+ * @filp: Pointer to a struct file trying to access the object.
+ * FreeBSD: use devfs_get_cdevpriv etc.
+ *
+ * Called from the map / write / read methods to verify that the
+ * caller is permitted to access the buffer object.
+ * This member may be set to NULL, which will refuse this kind of
+ * access for all buffer objects.
+ * This function should return 0 if access is granted, -EPERM otherwise.
+ */
+ int (*verify_access) (struct ttm_buffer_object *bo);
+
+ /**
+ * In case a driver writer dislikes the TTM fence objects,
+ * the driver writer can replace those with sync objects of
+ * his / her own. If it turns out that no driver writer is
+ * using these. I suggest we remove these hooks and plug in
+ * fences directly. The bo driver needs the following functionality:
+ * See the corresponding functions in the fence object API
+ * documentation.
+ */
+
+ bool (*sync_obj_signaled) (void *sync_obj);
+ int (*sync_obj_wait) (void *sync_obj,
+ bool lazy, bool interruptible);
+ int (*sync_obj_flush) (void *sync_obj);
+ void (*sync_obj_unref) (void **sync_obj);
+ void *(*sync_obj_ref) (void *sync_obj);
+
+ /* hook to notify driver about a driver move so it
+ * can do tiling things */
+ void (*move_notify)(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem);
+ /* notify the driver we are taking a fault on this BO
+ * and have reserved it */
+ int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify) (struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+};
+
+/**
+ * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
+ */
+
+struct ttm_bo_global_ref {
+ struct drm_global_reference ref;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_bo_global - Buffer object driver global data.
+ *
+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+
+struct ttm_bo_global {
+ u_int kobj_ref;
+
+ /**
+ * Constant after init.
+ */
+
+ struct ttm_mem_global *mem_glob;
+ struct vm_page *dummy_read_page;
+ struct ttm_mem_shrink shrink;
+ struct sx device_list_mutex;
+ struct mtx lru_lock;
+
+ /**
+ * Protected by device_list_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * Protected by the lru_lock.
+ */
+ struct list_head swap_lru;
+
+ /**
+ * Internal protection.
+ */
+ atomic_t bo_count;
+};
+
+
+#define TTM_NUM_MEM_TYPES 8
+
+#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
+ idling before CPU mapping */
+#define TTM_BO_PRIV_FLAG_MAX 1
+/**
+ * struct ttm_bo_device - Buffer object driver device-specific data.
+ *
+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
+ * @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
+ * @addr_space_mm: Range manager for the device address space.
+ * lru_lock: Spinlock that protects the buffer+device lru lists and
+ * ddestroy lists.
+ * @val_seq: Current validation sequence.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ *
+ */
+
+struct ttm_bo_device {
+
+ /*
+ * Constant after bo device init / atomic.
+ */
+ struct list_head device_list;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_driver *driver;
+ struct rwlock vm_lock;
+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+ struct mtx fence_lock;
+ /*
+ * Protected by the vm lock.
+ */
+ RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
+ struct drm_mm addr_space_mm;
+
+ /*
+ * Protected by the global:lru lock.
+ */
+ struct list_head ddestroy;
+ uint32_t val_seq;
+
+ /*
+ * Protected by load / firstopen / lastclose /unload sync.
+ */
+
+ struct address_space *dev_mapping;
+
+ /*
+ * Internal protection.
+ */
+
+ struct timeout_task wq;
+
+ bool need_dma32;
+};
+
+/**
+ * ttm_flag_masked
+ *
+ * @old: Pointer to the result and original value.
+ * @new: New value of bits.
+ * @mask: Mask of bits to change.
+ *
+ * Convenience function to change a number of bits identified by a mask.
+ */
+
+static inline uint32_t
+ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+{
+ *old ^= (*old ^ new) & mask;
+ return *old;
+}
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+extern void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+extern void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_cache_flush:
+ *
+ * @pages: An array of pointers to struct page:s to flush.
+ * @num_pages: Number of pages to flush.
+ *
+ * Flush the data of the indicated pages from the cpu caches.
+ * This is used when changing caching attributes of the pages from
+ * cache-coherent.
+ */
+extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+extern int ttm_tt_swapout(struct ttm_tt *ttm,
+ struct vm_object *persistent_swap_storage);
+
+/*
+ * ttm_bo.c
+ */
+
+/**
+ * ttm_mem_reg_is_pci
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @mem: A valid struct ttm_mem_reg.
+ *
+ * Returns true if the memory described by @mem is PCI memory,
+ * false otherwise.
+ */
+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+
+/**
+ * ttm_bo_mem_space
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_mem_reg.
+ * @interruptible: Sleep interruptible when sliping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @mem, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+ */
+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+
+extern void ttm_bo_global_release(struct drm_global_reference *ref);
+extern int ttm_bo_global_init(struct drm_global_reference *ref);
+
+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_device_init
+ *
+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
+ * @glob: A pointer to an initialized struct ttm_bo_global.
+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @file_page_offset: Offset into the device address space that is available
+ * for buffer data. This ensures compatibility with other users of the
+ * address space.
+ *
+ * Initializes a struct ttm_bo_device:
+ * Returns:
+ * !0: Failure.
+ */
+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
+ uint64_t file_page_offset, bool need_dma32);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
+extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+ bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation) and removes it from lru lists, while taking
+ * a number of measures to prevent deadlocks.
+ *
+ * Deadlocks may occur when two processes try to reserve multiple buffers in
+ * different order, either by will or as a result of a buffer being evicted
+ * to make room for a buffer already reserved. (Buffers are reserved before
+ * they are evicted). The following algorithm prevents such deadlocks from
+ * occurring:
+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
+ * reservation they are removed from the lru list. This stops a reserved buffer
+ * from being evicted. However the lru spinlock is released between the time
+ * a buffer is selected for eviction and the time it is reserved.
+ * Therefore a check is made when a buffer is reserved for eviction, that it
+ * is still the first buffer in the lru list, before it is removed from the
+ * list. @check_lru == 1 forces this check. If it fails, the function returns
+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
+ * the procedure.
+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
+ * (typically execbuf), should first obtain a unique 32-bit
+ * validation sequence number,
+ * and call this function with @use_sequence == 1 and @sequence == the unique
+ * sequence number. If upon call of this function, the buffer object is already
+ * reserved, the validation sequence is checked against the validation
+ * sequence of the process currently reserving the buffer,
+ * and if the current validation sequence is greater than that of the process
+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
+ * waiting for the buffer to become unreserved, after which it retries
+ * reserving.
+ * The caller should, when receiving an -EAGAIN error
+ * release all its buffer reservations, wait for @bo to become unreserved, and
+ * then rerun the validation with the same validation sequence. This procedure
+ * will always guarantee that the process with the lowest validation sequence
+ * will eventually succeed, preventing both deadlocks and starvation.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence);
+
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence,
+ uint32_t sequence);
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_wait_unreserved
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Wait for a struct ttm_buffer_object to become unreserved.
+ * This is typically used in the execbuf code to relax cpu-usage when
+ * a potential deadlock condition backoff.
+ */
+extern int ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo,
+ bool interruptible);
+
+/*
+ * ttm_bo_util.c
+ */
+
+/**
+ * ttm_bo_move_ttm
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Optimized move function for a buffer object with both old and
+ * new placement backed by a TTM. The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_free_old_node
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Utility function to free an old placement after a successful move.
+ */
+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @sync_obj: A sync object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+
+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ void *sync_obj,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+/**
+ * ttm_io_prot
+ *
+ * @c_state: Caching state.
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
+
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+
+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#define TTM_HAS_AGP
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct vm_page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
+ struct ttm_buffer_object *b);
+
+RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
+ ttm_bo_cmp_rb_tree_items);
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_bo_manager.c b/sys/dev/drm2/ttm/ttm_bo_manager.c
new file mode 100644
index 0000000..4686511
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_manager.c
@@ -0,0 +1,157 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/drm_mm.h>
+
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+ struct drm_mm mm;
+ struct mtx lock;
+};
+
+MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager");
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node = NULL;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ do {
+ ret = drm_mm_pre_get(mm);
+ if (unlikely(ret))
+ return ret;
+
+ mtx_lock(&rman->lock);
+ node = drm_mm_search_free_in_range(mm,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(node == NULL)) {
+ mtx_unlock(&rman->lock);
+ return 0;
+ }
+ node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ mtx_unlock(&rman->lock);
+ } while (node == NULL);
+
+ mem->mm_node = node;
+ mem->start = node->start;
+ return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+ if (mem->mm_node) {
+ mtx_lock(&rman->lock);
+ drm_mm_put_block(mem->mm_node);
+ mtx_unlock(&rman->lock);
+ mem->mm_node = NULL;
+ }
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct ttm_range_manager *rman;
+ int ret;
+
+ rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
+ ret = drm_mm_init(&rman->mm, 0, p_size);
+ if (ret) {
+ free(rman, M_TTM_RMAN);
+ return ret;
+ }
+
+ mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
+ man->priv = rman;
+ return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+
+ mtx_lock(&rman->lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ mtx_unlock(&rman->lock);
+ mtx_destroy(&rman->lock);
+ free(rman, M_TTM_RMAN);
+ man->priv = NULL;
+ return 0;
+ }
+ mtx_unlock(&rman->lock);
+ return -EBUSY;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+ mtx_lock(&rman->lock);
+ drm_mm_debug_table(&rman->mm, prefix);
+ mtx_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+ ttm_bo_man_init,
+ ttm_bo_man_takedown,
+ ttm_bo_man_get_node,
+ ttm_bo_man_put_node,
+ ttm_bo_man_debug
+};
diff --git a/sys/dev/drm2/ttm/ttm_bo_util.c b/sys/dev/drm2/ttm/ttm_bo_util.c
new file mode 100644
index 0000000..139d134
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_util.c
@@ -0,0 +1,658 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <sys/sf_buf.h>
+
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+{
+ ttm_bo_mem_put(bo, &bo->mem);
+}
+
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ bool evict,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
+ ttm_tt_unbind(ttm);
+ ttm_bo_free_old_node(bo);
+ ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+ TTM_PL_MASK_MEM);
+ old_mem->mem_type = TTM_PL_SYSTEM;
+ }
+
+ ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(ttm, new_mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
+
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+{
+ if (likely(man->io_reserve_fastpath))
+ return 0;
+
+ if (interruptible) {
+ if (sx_xlock_sig(&man->io_reserve_mutex))
+ return (-EINTR);
+ else
+ return (0);
+ }
+
+ sx_xlock(&man->io_reserve_mutex);
+ return 0;
+}
+
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ sx_xunlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+ struct ttm_buffer_object *bo;
+
+ if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+ return -EAGAIN;
+
+ bo = list_first_entry(&man->io_reserve_lru,
+ struct ttm_buffer_object,
+ io_reserve_lru);
+ list_del_init(&bo->io_reserve_lru);
+ ttm_bo_unmap_virtual_locked(bo);
+
+ return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (!bdev->driver->io_mem_reserve)
+ return 0;
+ if (likely(man->io_reserve_fastpath))
+ return bdev->driver->io_mem_reserve(bdev, mem);
+
+ if (bdev->driver->io_mem_reserve &&
+ mem->bus.io_reserved_count++ == 0) {
+retry:
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (ret == -EAGAIN) {
+ ret = ttm_mem_io_evict(man);
+ if (ret == 0)
+ goto retry;
+ }
+ }
+ return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ if (bdev->driver->io_mem_reserve &&
+ --mem->bus.io_reserved_count == 0 &&
+ bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ int ret;
+
+ if (!mem->bus.io_reserved_vm) {
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[mem->mem_type];
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ mem->bus.io_reserved_vm = true;
+ if (man->use_io_reserve_lru)
+ list_add_tail(&bo->io_reserve_lru,
+ &man->io_reserve_lru);
+ }
+ return 0;
+}
+
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (mem->bus.io_reserved_vm) {
+ mem->bus.io_reserved_vm = false;
+ list_del_init(&bo->io_reserve_lru);
+ ttm_mem_io_free(bo->bdev, mem);
+ }
+}
+
+static
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void **virtual)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret;
+ void *addr;
+
+ *virtual = NULL;
+ (void) ttm_mem_io_lock(man, false);
+ ret = ttm_mem_io_reserve(bdev, mem);
+ ttm_mem_io_unlock(man);
+ if (ret || !mem->bus.is_iomem)
+ return ret;
+
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
+ addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
+ mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
+ VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+ if (!addr) {
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
+ return -ENOMEM;
+ }
+ }
+ *virtual = addr;
+ return 0;
+}
+
+static
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void *virtual)
+{
+ struct ttm_mem_type_manager *man;
+
+ man = &bdev->man[mem->mem_type];
+
+ if (virtual && mem->bus.addr == NULL)
+ pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
+}
+
+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+ uint32_t *dstP =
+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+ uint32_t *srcP =
+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+ int i;
+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+ /* iowrite32(ioread32(srcP++), dstP++); */
+ *dstP++ = *srcP++;
+ return 0;
+}
+
+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+ unsigned long page,
+ vm_memattr_t prot)
+{
+ vm_page_t d = ttm->pages[page];
+ void *dst;
+
+ if (!d)
+ return -ENOMEM;
+
+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+
+ /* XXXKIB can't sleep ? */
+ dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
+ if (!dst)
+ return -ENOMEM;
+
+ memcpy(dst, src, PAGE_SIZE);
+
+ pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
+
+ return 0;
+}
+
+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+ unsigned long page,
+ vm_memattr_t prot)
+{
+ vm_page_t s = ttm->pages[page];
+ void *src;
+
+ if (!s)
+ return -ENOMEM;
+
+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+ src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
+ if (!src)
+ return -ENOMEM;
+
+ memcpy(dst, src, PAGE_SIZE);
+
+ pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
+
+ return 0;
+}
+
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg old_copy = *old_mem;
+ void *old_iomap;
+ void *new_iomap;
+ int ret;
+ unsigned long i;
+ unsigned long page;
+ unsigned long add = 0;
+ int dir;
+
+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+ if (ret)
+ return ret;
+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+ if (ret)
+ goto out;
+
+ if (old_iomap == NULL && new_iomap == NULL)
+ goto out2;
+ if (old_iomap == NULL && ttm == NULL)
+ goto out2;
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ goto out1;
+ }
+
+ add = 0;
+ dir = 1;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->start < old_mem->start + old_mem->size)) {
+ dir = -1;
+ add = new_mem->num_pages - 1;
+ }
+
+ for (i = 0; i < new_mem->num_pages; ++i) {
+ page = i * dir + add;
+ if (old_iomap == NULL) {
+ vm_memattr_t prot = ttm_io_prot(old_mem->placement);
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+ prot);
+ } else if (new_iomap == NULL) {
+ vm_memattr_t prot = ttm_io_prot(new_mem->placement);
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+ prot);
+ } else
+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ if (ret)
+ goto out1;
+ }
+ mb();
+out2:
+ old_copy = *old_mem;
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+ ttm_tt_unbind(ttm);
+ ttm_tt_destroy(ttm);
+ bo->ttm = NULL;
+ }
+
+out1:
+ ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+out:
+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ ttm_bo_mem_put(bo, &old_copy);
+ return ret;
+}
+
+MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
+
+static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+{
+ free(bo, M_TTM_TRANSF_OBJ);
+}
+
+/**
+ * ttm_buffer_object_transfer
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
+ * holding the data of @bo with the old placement.
+ *
+ * This is a utility function that may be called after an accelerated move
+ * has been scheduled. A new buffer object is created as a placeholder for
+ * the old data while it's being copied. When that buffer object is idle,
+ * it can be destroyed, releasing the space of the old placement.
+ * Returns:
+ * !0: Failure.
+ */
+
+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ struct ttm_buffer_object **new_obj)
+{
+ struct ttm_buffer_object *fbo;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+
+ fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_ZERO | M_WAITOK);
+ *fbo = *bo;
+
+ /**
+ * Fix up members that we shouldn't copy directly:
+ * TODO: Explicit member copy would probably be better here.
+ */
+
+ INIT_LIST_HEAD(&fbo->ddestroy);
+ INIT_LIST_HEAD(&fbo->lru);
+ INIT_LIST_HEAD(&fbo->swap);
+ INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ fbo->vm_node = NULL;
+ atomic_set(&fbo->cpu_writers, 0);
+
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ refcount_init(&fbo->list_kref, 1);
+ refcount_init(&fbo->kref, 1);
+ fbo->destroy = &ttm_transfered_destroy;
+ fbo->acc_size = 0;
+
+ *new_obj = fbo;
+ return 0;
+}
+
+vm_memattr_t
+ttm_io_prot(uint32_t caching_flags)
+{
+#if defined(__i386__) || defined(__amd64__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ return (VM_MEMATTR_WRITE_COMBINING);
+ else
+ /*
+ * We do not support i386, look at the linux source
+ * for the reason of the comment.
+ */
+ return (VM_MEMATTR_UNCACHEABLE);
+#else
+#error Port me
+#endif
+}
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ unsigned long size,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (bo->mem.bus.addr) {
+ map->bo_kmap_type = ttm_bo_map_premapped;
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+ } else {
+ map->bo_kmap_type = ttm_bo_map_iomap;
+ map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
+ bo->mem.bus.offset + offset, size,
+ (mem->placement & TTM_PL_FLAG_WC) ?
+ VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+ map->size = size;
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ unsigned long start_page,
+ unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ vm_memattr_t prot;
+ struct ttm_tt *ttm = bo->ttm;
+ int i, ret;
+
+ MPASS(ttm != NULL);
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+ }
+
+ if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ /*
+ * We're mapping a single page, and the desired
+ * page protection is consistent with the bo.
+ */
+
+ map->bo_kmap_type = ttm_bo_map_kmap;
+ map->page = ttm->pages[start_page];
+ map->sf = sf_buf_alloc(map->page, 0);
+ map->virtual = (void *)sf_buf_kva(map->sf);
+ } else {
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contiguous.
+ */
+ prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+ VM_MEMATTR_WRITE_COMBINING :
+ ttm_io_prot(mem->placement);
+ map->bo_kmap_type = ttm_bo_map_vmap;
+ map->num_pages = num_pages;
+ map->virtual = (void *)kmem_alloc_nofault(kernel_map,
+ num_pages * PAGE_SIZE);
+ if (map->virtual != NULL) {
+ for (i = 0; i < num_pages; i++) {
+ /* XXXKIB hack */
+ pmap_page_set_memattr(ttm->pages[start_page +
+ i], prot);
+ }
+ pmap_qenter((vm_offset_t)map->virtual,
+ &ttm->pages[start_page], num_pages);
+ }
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+ unsigned long start_page, unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+ unsigned long offset, size;
+ int ret;
+
+ MPASS(list_empty(&bo->swap));
+ map->virtual = NULL;
+ map->bo = bo;
+ if (num_pages > bo->num_pages)
+ return -EINVAL;
+ if (start_page > bo->num_pages)
+ return -EINVAL;
+#if 0
+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ return -EPERM;
+#endif
+ (void) ttm_mem_io_lock(man, false);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ttm_mem_io_unlock(man);
+ if (ret)
+ return ret;
+ if (!bo->mem.bus.is_iomem) {
+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+ } else {
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
+ }
+}
+
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_buffer_object *bo = map->bo;
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+
+ if (!map->virtual)
+ return;
+ switch (map->bo_kmap_type) {
+ case ttm_bo_map_iomap:
+ pmap_unmapdev((vm_offset_t)map->virtual, map->size);
+ break;
+ case ttm_bo_map_vmap:
+ pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
+ kmem_free(kernel_map, (vm_offset_t)map->virtual,
+ map->num_pages * PAGE_SIZE);
+ break;
+ case ttm_bo_map_kmap:
+ sf_buf_free(map->sf);
+ break;
+ case ttm_bo_map_premapped:
+ break;
+ default:
+ MPASS(0);
+ }
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_unlock(man);
+ map->virtual = NULL;
+ map->page = NULL;
+ map->sf = NULL;
+}
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ void *sync_obj,
+ bool evict,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+ struct ttm_buffer_object *ghost_obj;
+ void *tmp_obj = NULL;
+
+ mtx_lock(&bdev->fence_lock);
+ if (bo->sync_obj) {
+ tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ }
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ if (evict) {
+ ret = ttm_bo_wait(bo, false, false, false);
+ mtx_unlock(&bdev->fence_lock);
+ if (tmp_obj)
+ driver->sync_obj_unref(&tmp_obj);
+ if (ret)
+ return ret;
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ (bo->ttm != NULL)) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_free_old_node(bo);
+ } else {
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+
+ /* ttm_buffer_object_transfer accesses bo->sync_obj */
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ mtx_unlock(&bdev->fence_lock);
+ if (tmp_obj)
+ driver->sync_obj_unref(&tmp_obj);
+
+ if (ret)
+ return ret;
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost_obj);
+ ttm_bo_unref(&ghost_obj);
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
diff --git a/sys/dev/drm2/ttm/ttm_bo_vm.c b/sys/dev/drm2/ttm/ttm_bo_vm.c
new file mode 100644
index 0000000..03e2f2b
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_bo_vm.c
@@ -0,0 +1,492 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_vm.h"
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
+ ttm_bo_cmp_rb_tree_items);
+
+int
+ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
+ struct ttm_buffer_object *b)
+{
+
+ if (a->vm_node->start < b->vm_node->start) {
+ return (-1);
+ } else if (a->vm_node->start > b->vm_node->start) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
+ unsigned long page_start,
+ unsigned long num_pages)
+{
+ unsigned long cur_offset;
+ struct ttm_buffer_object *bo;
+ struct ttm_buffer_object *best_bo = NULL;
+
+ RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
+ cur_offset = bo->vm_node->start;
+ if (page_start >= cur_offset) {
+ best_bo = bo;
+ if (page_start == cur_offset)
+ break;
+ }
+ }
+
+ if (unlikely(best_bo == NULL))
+ return NULL;
+
+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
+ (page_start + num_pages)))
+ return NULL;
+
+ return best_bo;
+}
+
+static int
+ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
+ int prot, vm_page_t *mres)
+{
+
+ struct ttm_buffer_object *bo = vm_obj->handle;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_tt *ttm = NULL;
+ vm_page_t m, oldm;
+ int ret;
+ int retval = VM_PAGER_OK;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+
+ vm_object_pip_add(vm_obj, 1);
+ oldm = *mres;
+ if (oldm != NULL) {
+ vm_page_lock(oldm);
+ vm_page_remove(oldm);
+ vm_page_unlock(oldm);
+ *mres = NULL;
+ } else
+ oldm = NULL;
+retry:
+ VM_OBJECT_UNLOCK(vm_obj);
+ m = NULL;
+
+reserve:
+ mtx_lock(&bo->glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
+ mtx_unlock(&bo->glob->lru_lock);
+ if (unlikely(ret != 0)) {
+ if (ret == -EBUSY) {
+ kern_yield(0);
+ goto reserve;
+ }
+ }
+
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ case -ERESTART:
+ case -EINTR:
+ kern_yield(0);
+ goto reserve;
+ default:
+ retval = VM_PAGER_ERROR;
+ goto out_unlock;
+ }
+ }
+
+ /*
+ * Wait for buffer data in transit, due to a pipelined
+ * move.
+ */
+
+ mtx_lock(&bdev->fence_lock);
+ if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+ ret = ttm_bo_wait(bo, false, true, false);
+ mtx_unlock(&bdev->fence_lock);
+ if (unlikely(ret != 0)) {
+ retval = VM_PAGER_ERROR;
+ goto out_unlock;
+ }
+ } else
+ mtx_unlock(&bdev->fence_lock);
+
+ ret = ttm_mem_io_lock(man, true);
+ if (unlikely(ret != 0)) {
+ retval = VM_PAGER_ERROR;
+ goto out_unlock;
+ }
+ ret = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(ret != 0)) {
+ retval = VM_PAGER_ERROR;
+ goto out_io_unlock;
+ }
+
+ /*
+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
+ * since the mmap_sem is only held in read mode. However, we
+ * modify only the caching bits of vma->vm_page_prot and
+ * consider those bits protected by
+ * the bo->mutex, as we should be the only writers.
+ * There shouldn't really be any readers of these bits except
+ * within vm_insert_mixed()? fork?
+ *
+ * TODO: Add a list of vmas to the bo, and change the
+ * vma->vm_page_prot when the object changes caching policy, with
+ * the correct locks held.
+ */
+ if (!bo->mem.bus.is_iomem) {
+ /* Allocate all page at once, most common usage */
+ ttm = bo->ttm;
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ retval = VM_PAGER_ERROR;
+ goto out_io_unlock;
+ }
+ }
+
+ if (bo->mem.bus.is_iomem) {
+ m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
+ bo->mem.bus.offset + offset);
+ pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
+ } else {
+ ttm = bo->ttm;
+ m = ttm->pages[OFF_TO_IDX(offset)];
+ if (unlikely(!m)) {
+ retval = VM_PAGER_ERROR;
+ goto out_io_unlock;
+ }
+ pmap_page_set_memattr(m,
+ (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+ VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
+ }
+
+ VM_OBJECT_LOCK(vm_obj);
+ if ((m->flags & VPO_BUSY) != 0) {
+ vm_page_sleep(m, "ttmpbs");
+ ttm_mem_io_unlock(man);
+ ttm_bo_unreserve(bo);
+ goto retry;
+ }
+ m->valid = VM_PAGE_BITS_ALL;
+ *mres = m;
+ vm_page_lock(m);
+ vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
+ vm_page_unlock(m);
+ vm_page_busy(m);
+
+ if (oldm != NULL) {
+ vm_page_lock(oldm);
+ vm_page_free(oldm);
+ vm_page_unlock(oldm);
+ }
+
+out_io_unlock1:
+ ttm_mem_io_unlock(man);
+out_unlock1:
+ ttm_bo_unreserve(bo);
+ vm_object_pip_wakeup(vm_obj);
+ return (retval);
+
+out_io_unlock:
+ VM_OBJECT_LOCK(vm_obj);
+ goto out_io_unlock1;
+
+out_unlock:
+ VM_OBJECT_LOCK(vm_obj);
+ goto out_unlock1;
+}
+
+static int
+ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+ struct ttm_buffer_object *bo = handle;
+
+ *color = 0;
+ (void)ttm_bo_reference(bo);
+ return (0);
+}
+
+static void
+ttm_bo_vm_dtor(void *handle)
+{
+ struct ttm_buffer_object *bo = handle;
+
+ ttm_bo_unref(&bo);
+}
+
+static struct cdev_pager_ops ttm_pager_ops = {
+ .cdev_pg_fault = ttm_bo_vm_fault,
+ .cdev_pg_ctor = ttm_bo_vm_ctor,
+ .cdev_pg_dtor = ttm_bo_vm_dtor
+};
+
+int
+ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj_res, int nprot)
+{
+ struct ttm_bo_driver *driver;
+ struct ttm_buffer_object *bo;
+ struct vm_object *vm_obj;
+ int ret;
+
+ rw_wlock(&bdev->vm_lock);
+ bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
+ if (likely(bo != NULL))
+ refcount_acquire(&bo->kref);
+ rw_wunlock(&bdev->vm_lock);
+
+ if (unlikely(bo == NULL)) {
+ printf("[TTM] Could not find buffer object to map\n");
+ return (EINVAL);
+ }
+
+ driver = bo->bdev->driver;
+ if (unlikely(!driver->verify_access)) {
+ ret = EPERM;
+ goto out_unref;
+ }
+ ret = -driver->verify_access(bo);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
+ size, nprot, 0, curthread->td_ucred);
+ if (vm_obj == NULL) {
+ ret = EINVAL;
+ goto out_unref;
+ }
+ /*
+ * Note: We're transferring the bo reference to vm_obj->handle here.
+ */
+ *offset = 0;
+ *obj_res = vm_obj;
+ return 0;
+out_unref:
+ ttm_bo_unref(&bo);
+ return ret;
+}
+
+#if 0
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ vma->vm_ops = &ttm_bo_vm_ops;
+ vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+}
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char __user *wbuf, char __user *rbuf, size_t count,
+ loff_t *f_pos, bool write)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_driver *driver;
+ struct ttm_bo_kmap_obj map;
+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ read_lock(&bdev->vm_lock);
+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
+ if (likely(bo != NULL))
+ ttm_bo_reference(bo);
+ read_unlock(&bdev->vm_lock);
+
+ if (unlikely(bo == NULL))
+ return -EFAULT;
+
+ driver = bo->bdev->driver;
+ if (unlikely(!driver->verify_access)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = driver->verify_access(bo, filp);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ kmap_offset = dev_offset - bo->vm_node->start;
+ if (unlikely(kmap_offset >= bo->num_pages)) {
+ ret = -EFBIG;
+ goto out_unref;
+ }
+
+ page_offset = *f_pos & ~PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = -EAGAIN;
+ goto out_unref;
+ default:
+ goto out_unref;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ goto out_unref;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return -EFBIG;
+
+ *f_pos += io_size;
+
+ return io_size;
+out_unref:
+ ttm_bo_unref(&bo);
+ return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+ char __user *rbuf, size_t count, loff_t *f_pos,
+ bool write)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ kmap_offset = (*f_pos >> PAGE_SHIFT);
+ if (unlikely(kmap_offset >= bo->num_pages))
+ return -EFBIG;
+
+ page_offset = *f_pos & ~PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ return -EAGAIN;
+ default:
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ return ret;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ *f_pos += io_size;
+
+ return io_size;
+}
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_execbuf_util.c b/sys/dev/drm2/ttm/ttm_execbuf_util.c
new file mode 100644
index 0000000..ce5cf15
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_execbuf_util.c
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_execbuf_util.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
+ entry->reserved = false;
+ atomic_set(&bo->reserved, 0);
+ wakeup(bo);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
+ }
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ ret = ttm_bo_wait_unreserved_locked(bo, true);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_bo_global *glob;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+ mtx_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ mtx_unlock(&glob->lru_lock);
+}
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list)
+{
+ struct ttm_bo_global *glob;
+ struct ttm_validate_buffer *entry;
+ int ret;
+ uint32_t val_seq;
+
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+
+ mtx_lock(&glob->lru_lock);
+retry_locked:
+ val_seq = entry->bo->bdev->val_seq++;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved_locked(bo, true);
+ if (unlikely(ret != 0)) {
+ mtx_unlock(&glob->lru_lock);
+ return ret;
+ }
+ goto retry_locked;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation_locked(list);
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return -EBUSY;
+ }
+ }
+
+ ttm_eu_del_from_lru_locked(list);
+ mtx_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
+ return 0;
+}
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
+
+ if (list_empty(list))
+ return;
+
+ bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ mtx_lock(&glob->lru_lock);
+ mtx_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ entry->old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ ttm_bo_unreserve_locked(bo);
+ entry->reserved = false;
+ }
+ mtx_unlock(&bdev->fence_lock);
+ mtx_unlock(&glob->lru_lock);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
+ }
+}
diff --git a/sys/dev/drm2/ttm/ttm_execbuf_util.h b/sys/dev/drm2/ttm/ttm_execbuf_util.h
new file mode 100644
index 0000000..68b0eba
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_execbuf_util.h
@@ -0,0 +1,109 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include <dev/drm2/ttm/ttm_bo_api.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head: list head for thread-private list.
+ * @bo: refcounted buffer object pointer.
+ * @reserved: Indicates whether @bo has been reserved for validation.
+ * @removed: Indicates whether @bo has been removed from lru lists.
+ * @put_count: Number of outstanding references on bo::list_kref.
+ * @old_sync_obj: Pointer to a sync object about to be unreferenced
+ */
+
+struct ttm_validate_buffer {
+ struct list_head head;
+ struct ttm_buffer_object *bo;
+ bool reserved;
+ bool removed;
+ int put_count;
+ void *old_sync_obj;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct list_head *list);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @sync_obj: The new sync object for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_lock.c b/sys/dev/drm2/ttm/ttm_lock.c
new file mode 100644
index 0000000..8ec3041
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_lock.c
@@ -0,0 +1,340 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/ttm/ttm_lock.h>
+#include <dev/drm2/ttm/ttm_module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+
+static void
+ttm_lock_send_sig(int signo)
+{
+ struct proc *p;
+
+ p = curproc; /* XXXKIB curthread ? */
+ PROC_LOCK(p);
+ kern_psignal(p, signo);
+ PROC_UNLOCK(p);
+}
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (unlikely(lock->kill_takers)) {
+ ttm_lock_send_sig(lock->signal);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ return locked;
+}
+
+int
+ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmri";
+ } else {
+ flags = 0;
+ wmsg = "ttmr";
+ }
+ mtx_lock(&lock->lock);
+ while (!__ttm_read_lock(lock)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (ret != 0)
+ break;
+ }
+ return (-ret);
+}
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ if (unlikely(lock->kill_takers)) {
+ ttm_lock_send_sig(lock->signal);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+ bool locked;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmrti";
+ } else {
+ flags = 0;
+ wmsg = "ttmrt";
+ }
+ mtx_lock(&lock->lock);
+ while (!__ttm_read_trylock(lock, &locked)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (ret != 0)
+ break;
+ }
+ MPASS(!locked || ret == 0);
+ mtx_unlock(&lock->lock);
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ lock->rw = 0;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (unlikely(lock->kill_takers)) {
+ ttm_lock_send_sig(lock->signal);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ return locked;
+}
+
+int
+ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmwi";
+ } else {
+ flags = 0;
+ wmsg = "ttmw";
+ }
+ mtx_lock(&lock->lock);
+ /* XXXKIB: linux uses __ttm_read_lock for uninterruptible sleeps */
+ while (!__ttm_write_lock(lock)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (interruptible && ret != 0) {
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wakeup(lock);
+ break;
+ }
+ }
+ mtx_unlock(&lock->lock);
+
+ return (-ret);
+}
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ lock->rw = 1;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ mtx_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ MPASS(ret == 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ const char *wmsg;
+ int flags, ret;
+
+ ret = 0;
+ if (interruptible) {
+ flags = PCATCH;
+ wmsg = "ttmwi";
+ } else {
+ flags = 0;
+ wmsg = "ttmw";
+ }
+ mtx_lock(&lock->lock);
+ while (!__ttm_vt_lock(lock)) {
+ ret = msleep(lock, &lock->lock, flags, wmsg, 0);
+ if (interruptible && ret != 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wakeup(lock);
+ break;
+ }
+ }
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else
+ lock->vt_holder = tfile;
+
+ return (-ret);
+}
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wakeup(lock);
+ mtx_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ mtx_lock(&lock->lock);
+ while (!__ttm_suspend_lock(lock))
+ msleep(lock, &lock->lock, 0, "ttms", 0);
+ mtx_unlock(&lock->lock);
+}
diff --git a/sys/dev/drm2/ttm/ttm_lock.h b/sys/dev/drm2/ttm/ttm_lock.h
new file mode 100644
index 0000000..6d45457
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_lock.h
@@ -0,0 +1,228 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/ttm/ttm_object.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ struct mtx lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_memory.c b/sys/dev/drm2/ttm/ttm_memory.c
new file mode 100644
index 0000000..ee74d94
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_memory.c
@@ -0,0 +1,471 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+#define TTM_MEMORY_ALLOC_RETRIES 4
+
+struct ttm_mem_zone {
+ u_int kobj_ref;
+ struct ttm_mem_global *glob;
+ const char *name;
+ uint64_t zone_mem;
+ uint64_t emer_mem;
+ uint64_t max_mem;
+ uint64_t swap_limit;
+ uint64_t used_mem;
+};
+
+MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
+
+static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
+{
+
+ printf("pTTM] Zone %7s: Used memory at exit: %llu kiB\n",
+ zone->name, (unsigned long long)zone->used_mem >> 10);
+ free(zone, M_TTM_ZONE);
+}
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
+ struct attribute *attr,
+ char *buffer)
+{
+ uint64_t val = 0;
+
+ mtx_lock(&zone->glob->lock);
+ if (attr == &ttm_mem_sys)
+ val = zone->zone_mem;
+ else if (attr == &ttm_mem_emer)
+ val = zone->emer_mem;
+ else if (attr == &ttm_mem_max)
+ val = zone->max_mem;
+ else if (attr == &ttm_mem_swap)
+ val = zone->swap_limit;
+ else if (attr == &ttm_mem_used)
+ val = zone->used_mem;
+ mtx_unlock(&zone->glob->lock);
+
+ return snprintf(buffer, PAGE_SIZE, "%llu\n",
+ (unsigned long long) val >> 10);
+}
+#endif
+
+static void ttm_check_swapping(struct ttm_mem_global *glob);
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
+ struct attribute *attr,
+ const char *buffer,
+ size_t size)
+{
+ int chars;
+ unsigned long val;
+ uint64_t val64;
+
+ chars = sscanf(buffer, "%lu", &val);
+ if (chars == 0)
+ return size;
+
+ val64 = val;
+ val64 <<= 10;
+
+ mtx_lock(&zone->glob->lock);
+ if (val64 > zone->zone_mem)
+ val64 = zone->zone_mem;
+ if (attr == &ttm_mem_emer) {
+ zone->emer_mem = val64;
+ if (zone->max_mem > val64)
+ zone->max_mem = val64;
+ } else if (attr == &ttm_mem_max) {
+ zone->max_mem = val64;
+ if (zone->emer_mem < val64)
+ zone->emer_mem = val64;
+ } else if (attr == &ttm_mem_swap)
+ zone->swap_limit = val64;
+ mtx_unlock(&zone->glob->lock);
+
+ ttm_check_swapping(zone->glob);
+
+ return size;
+}
+#endif
+
+static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
+{
+
+ free(glob, M_TTM_ZONE);
+}
+
+static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
+ bool from_wq, uint64_t extra)
+{
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+ uint64_t target;
+
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+
+ if (from_wq)
+ target = zone->swap_limit;
+ else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
+ target = zone->emer_mem;
+ else
+ target = zone->max_mem;
+
+ target = (extra > target) ? 0ULL : target;
+
+ if (zone->used_mem > target)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * At this point we only support a single shrink callback.
+ * Extend this if needed, perhaps using a linked list of callbacks.
+ * Note that this function is reentrant:
+ * many threads may try to swap out at any given time.
+ */
+
+static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
+ uint64_t extra)
+{
+ int ret;
+ struct ttm_mem_shrink *shrink;
+
+ mtx_lock(&glob->lock);
+ if (glob->shrink == NULL)
+ goto out;
+
+ while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
+ shrink = glob->shrink;
+ mtx_unlock(&glob->lock);
+ ret = shrink->do_shrink(shrink);
+ mtx_lock(&glob->lock);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+out:
+ mtx_unlock(&glob->lock);
+}
+
+
+
+static void ttm_shrink_work(void *arg, int pending __unused)
+{
+ struct ttm_mem_global *glob = arg;
+
+ ttm_shrink(glob, true, 0ULL);
+}
+
+static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+ uint64_t mem)
+{
+ struct ttm_mem_zone *zone;
+
+ zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
+
+ zone->name = "kernel";
+ zone->zone_mem = mem;
+ zone->max_mem = mem >> 1;
+ zone->emer_mem = (mem >> 1) + (mem >> 2);
+ zone->swap_limit = zone->max_mem - (mem >> 3);
+ zone->used_mem = 0;
+ zone->glob = glob;
+ glob->zone_kernel = zone;
+ refcount_init(&zone->kobj_ref, 1);
+ glob->zones[glob->num_zones++] = zone;
+ return 0;
+}
+
+static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+ uint64_t mem)
+{
+ struct ttm_mem_zone *zone;
+
+ zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
+
+ /**
+ * No special dma32 zone needed.
+ */
+
+ if (mem <= ((uint64_t) 1ULL << 32)) {
+ free(zone, M_TTM_ZONE);
+ return 0;
+ }
+
+ /*
+ * Limit max dma32 memory to 4GB for now
+ * until we can figure out how big this
+ * zone really is.
+ */
+
+ mem = ((uint64_t) 1ULL << 32);
+ zone->name = "dma32";
+ zone->zone_mem = mem;
+ zone->max_mem = mem >> 1;
+ zone->emer_mem = (mem >> 1) + (mem >> 2);
+ zone->swap_limit = zone->max_mem - (mem >> 3);
+ zone->used_mem = 0;
+ zone->glob = glob;
+ glob->zone_dma32 = zone;
+ refcount_init(&zone->kobj_ref, 1);
+ glob->zones[glob->num_zones++] = zone;
+ return 0;
+}
+
+int ttm_mem_global_init(struct ttm_mem_global *glob)
+{
+ u_int64_t mem;
+ int ret;
+ int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
+ glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
+ taskqueue_thread_enqueue, &glob->swap_queue);
+ taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
+ TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
+
+ refcount_init(&glob->kobj_ref, 1);
+
+ mem = physmem * PAGE_SIZE;
+
+ ret = ttm_mem_init_kernel_zone(glob, mem);
+ if (unlikely(ret != 0))
+ goto out_no_zone;
+ ret = ttm_mem_init_dma32_zone(glob, mem);
+ if (unlikely(ret != 0))
+ goto out_no_zone;
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
+ zone->name, (unsigned long long)zone->max_mem >> 10);
+ }
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ return 0;
+out_no_zone:
+ ttm_mem_global_release(glob);
+ return ret;
+}
+
+void ttm_mem_global_release(struct ttm_mem_global *glob)
+{
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+ ttm_dma_page_alloc_fini();
+
+ taskqueue_drain(glob->swap_queue, &glob->work);
+ taskqueue_free(glob->swap_queue);
+ glob->swap_queue = NULL;
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (refcount_release(&zone->kobj_ref))
+ ttm_mem_zone_kobj_release(zone);
+ }
+ if (refcount_release(&glob->kobj_ref))
+ ttm_mem_global_kobj_release(glob);
+}
+
+static void ttm_check_swapping(struct ttm_mem_global *glob)
+{
+ bool needs_swapping = false;
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_lock(&glob->lock);
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (zone->used_mem > zone->swap_limit) {
+ needs_swapping = true;
+ break;
+ }
+ }
+
+ mtx_unlock(&glob->lock);
+
+ if (unlikely(needs_swapping))
+ taskqueue_enqueue(glob->swap_queue, &glob->work);
+
+}
+
+static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
+ struct ttm_mem_zone *single_zone,
+ uint64_t amount)
+{
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_lock(&glob->lock);
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (single_zone && zone != single_zone)
+ continue;
+ zone->used_mem -= amount;
+ }
+ mtx_unlock(&glob->lock);
+}
+
+void ttm_mem_global_free(struct ttm_mem_global *glob,
+ uint64_t amount)
+{
+ return ttm_mem_global_free_zone(glob, NULL, amount);
+}
+
+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+ struct ttm_mem_zone *single_zone,
+ uint64_t amount, bool reserve)
+{
+ uint64_t limit;
+ int ret = -ENOMEM;
+ unsigned int i;
+ struct ttm_mem_zone *zone;
+
+ mtx_lock(&glob->lock);
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (single_zone && zone != single_zone)
+ continue;
+
+ limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
+ zone->emer_mem : zone->max_mem;
+
+ if (zone->used_mem > limit)
+ goto out_unlock;
+ }
+
+ if (reserve) {
+ for (i = 0; i < glob->num_zones; ++i) {
+ zone = glob->zones[i];
+ if (single_zone && zone != single_zone)
+ continue;
+ zone->used_mem += amount;
+ }
+ }
+
+ ret = 0;
+out_unlock:
+ mtx_unlock(&glob->lock);
+ ttm_check_swapping(glob);
+
+ return ret;
+}
+
+
+static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
+ struct ttm_mem_zone *single_zone,
+ uint64_t memory,
+ bool no_wait, bool interruptible)
+{
+ int count = TTM_MEMORY_ALLOC_RETRIES;
+
+ while (unlikely(ttm_mem_global_reserve(glob,
+ single_zone,
+ memory, true)
+ != 0)) {
+ if (no_wait)
+ return -ENOMEM;
+ if (unlikely(count-- == 0))
+ return -ENOMEM;
+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+ }
+
+ return 0;
+}
+
+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ bool no_wait, bool interruptible)
+{
+ /**
+ * Normal allocations of kernel memory are registered in
+ * all zones.
+ */
+
+ return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
+ interruptible);
+}
+
+#define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
+
+int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+ struct vm_page *page,
+ bool no_wait, bool interruptible)
+{
+
+ struct ttm_mem_zone *zone = NULL;
+
+ /**
+ * Page allocations may be registed in a single zone
+ * only if highmem or !dma32.
+ */
+
+ if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+ zone = glob->zone_kernel;
+ return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
+ interruptible);
+}
+
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
+{
+ struct ttm_mem_zone *zone = NULL;
+
+ if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+ zone = glob->zone_kernel;
+ ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+}
+
+
+size_t ttm_round_pot(size_t size)
+{
+ if ((size & (size - 1)) == 0)
+ return size;
+ else if (size > PAGE_SIZE)
+ return PAGE_ALIGN(size);
+ else {
+ size_t tmp_size = 4;
+
+ while (tmp_size < size)
+ tmp_size <<= 1;
+
+ return tmp_size;
+ }
+ return 0;
+}
diff --git a/sys/dev/drm2/ttm/ttm_memory.h b/sys/dev/drm2/ttm/ttm_memory.h
new file mode 100644
index 0000000..53263c6
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_memory.h
@@ -0,0 +1,149 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/* $FreeBSD$ */
+
+#ifndef TTM_MEMORY_H
+#define TTM_MEMORY_H
+
+/**
+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
+ *
+ * @do_shrink: The callback function.
+ *
+ * Arguments to the do_shrink functions are intended to be passed using
+ * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
+ * and can be accessed using container_of().
+ */
+
+struct ttm_mem_shrink {
+ int (*do_shrink) (struct ttm_mem_shrink *);
+};
+
+/**
+ * struct ttm_mem_global - Global memory accounting structure.
+ *
+ * @shrink: A single callback to shrink TTM memory usage. Extend this
+ * to a linked list to be able to handle multiple callbacks when needed.
+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
+ * need a separate workqueue since it will spend a lot of time waiting
+ * for the GPU, and this will otherwise block other workqueue tasks(?)
+ * At this point we use only a single-threaded workqueue.
+ * @work: The workqueue callback for the shrink queue.
+ * @lock: Lock to protect the @shrink - and the memory accounting members,
+ * that is, essentially the whole structure with some exceptions.
+ * @zones: Array of pointers to accounting zones.
+ * @num_zones: Number of populated entries in the @zones array.
+ * @zone_kernel: Pointer to the kernel zone.
+ * @zone_highmem: Pointer to the highmem zone if there is one.
+ * @zone_dma32: Pointer to the dma32 zone if there is one.
+ *
+ * Note that this structure is not per device. It should be global for all
+ * graphics devices.
+ */
+
+#define TTM_MEM_MAX_ZONES 2
+struct ttm_mem_zone;
+struct ttm_mem_global {
+ u_int kobj_ref;
+ struct ttm_mem_shrink *shrink;
+ struct taskqueue *swap_queue;
+ struct task work;
+ struct mtx lock;
+ struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
+ unsigned int num_zones;
+ struct ttm_mem_zone *zone_kernel;
+ struct ttm_mem_zone *zone_dma32;
+};
+
+/**
+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
+ *
+ * @shrink: The object to initialize.
+ * @func: The callback function.
+ */
+
+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
+ int (*func) (struct ttm_mem_shrink *))
+{
+ shrink->do_shrink = func;
+}
+
+/**
+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to register with.
+ * @shrink: An initialized struct ttm_mem_shrink object to register.
+ *
+ * Returns:
+ * -EBUSY: There's already a callback registered. (May change).
+ */
+
+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
+ struct ttm_mem_shrink *shrink)
+{
+ mtx_lock(&glob->lock);
+ if (glob->shrink != NULL) {
+ mtx_unlock(&glob->lock);
+ return -EBUSY;
+ }
+ glob->shrink = shrink;
+ mtx_unlock(&glob->lock);
+ return 0;
+}
+
+/**
+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to unregister from.
+ * @shrink: A previously registert struct ttm_mem_shrink object.
+ *
+ */
+
+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
+ struct ttm_mem_shrink *shrink)
+{
+ mtx_lock(&glob->lock);
+ MPASS(glob->shrink == shrink);
+ glob->shrink = NULL;
+ mtx_unlock(&glob->lock);
+}
+
+struct vm_page;
+
+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ bool no_wait, bool interruptible);
+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
+ uint64_t amount);
+extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+ struct vm_page *page,
+ bool no_wait, bool interruptible);
+extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
+ struct vm_page *page);
+extern size_t ttm_round_pot(size_t size);
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_module.h b/sys/dev/drm2/ttm/ttm_module.h
new file mode 100644
index 0000000..b1ce2bc
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_module.h
@@ -0,0 +1,37 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#define TTM_PFX "[TTM] "
+
+#endif /* _TTM_MODULE_H_ */
diff --git a/sys/dev/drm2/ttm/ttm_object.c b/sys/dev/drm2/ttm/ttm_object.c
new file mode 100644
index 0000000..fd48044
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_object.c
@@ -0,0 +1,455 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <sys/rwlock.h>
+#include <dev/drm2/ttm/ttm_object.h>
+#include <dev/drm2/ttm/ttm_module.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ struct rwlock lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ u_int refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ struct rwlock object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ u_int kref;
+ enum ttm_ref_type ref_type;
+ struct ttm_base_object *obj;
+ struct ttm_object_file *tfile;
+};
+
+MALLOC_DEFINE(M_TTM_OBJ_FILE, "ttm_obj_file", "TTM File Objects");
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ refcount_acquire(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct ttm_object_file *tfile)
+{
+
+ free(tfile, M_TTM_OBJ_FILE);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ if (refcount_release(&tfile->refcount))
+ ttm_object_file_destroy(tfile);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*rcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = rcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ refcount_init(&base->refcount, 1);
+ rw_init(&tdev->object_lock, "ttmbao");
+ rw_wlock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ rw_wunlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ rw_wlock(&tdev->object_lock);
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ rw_wunlock(&tdev->object_lock);
+out_err0:
+ return ret;
+}
+
+static void ttm_release_base(struct ttm_base_object *base)
+{
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ rw_wunlock(&tdev->object_lock);
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ rw_wlock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ rw_wlock(&tdev->object_lock);
+ if (refcount_release(&base->refcount))
+ ttm_release_base(base);
+ rw_wunlock(&tdev->object_lock);
+}
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ rw_rlock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (ret == 0) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ refcount_acquire(&base->refcount);
+ }
+ rw_runlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printf("[TTM] Attempted access of non-shareable object %p\n",
+ base);
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+
+MALLOC_DEFINE(M_TTM_OBJ_REF, "ttm_obj_ref", "TTM Ref Objects");
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ rw_rlock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ refcount_acquire(&ref->kref);
+ rw_runlock(&tfile->lock);
+ break;
+ }
+
+ rw_runlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ refcount_init(&ref->kref, 1);
+
+ rw_wlock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (ret == 0) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ refcount_acquire(&base->refcount);
+ rw_wunlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ rw_wunlock(&tfile->lock);
+ MPASS(ret == -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ free(ref, M_TTM_OBJ_REF);
+ }
+
+ return ret;
+}
+
+static void ttm_ref_object_release(struct ttm_ref_object *ref)
+{
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ rw_wunlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ free(ref, M_TTM_OBJ_REF);
+ rw_wlock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ rw_wlock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ rw_wunlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ if (refcount_release(&ref->kref))
+ ttm_ref_object_release(ref);
+ rw_wunlock(&tfile->lock);
+ return 0;
+}
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ rw_wlock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(ref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ rw_wunlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile;
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ tfile = malloc(sizeof(*tfile), M_TTM_OBJ_FILE, M_WAITOK);
+ rw_init(&tfile->lock, "ttmfo");
+ tfile->tdev = tdev;
+ refcount_init(&tfile->refcount, 1);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ free(tfile, M_TTM_OBJ_FILE);
+
+ return NULL;
+}
+
+MALLOC_DEFINE(M_TTM_OBJ_DEV, "ttm_obj_dev", "TTM Device Objects");
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev;
+ int ret;
+
+ tdev = malloc(sizeof(*tdev), M_TTM_OBJ_DEV, M_WAITOK);
+ tdev->mem_glob = mem_glob;
+ rw_init(&tdev->object_lock, "ttmdo");
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (ret == 0)
+ return tdev;
+
+ free(tdev, M_TTM_OBJ_DEV);
+ return NULL;
+}
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ rw_wlock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ rw_wunlock(&tdev->object_lock);
+
+ free(tdev, M_TTM_OBJ_DEV);
+}
diff --git a/sys/dev/drm2/ttm/ttm_object.h b/sys/dev/drm2/ttm/ttm_object.h
new file mode 100644
index 0000000..8a286f1
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_object.h
@@ -0,0 +1,271 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <dev/drm2/drm_hashtab.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1,
+ ttm_driver_type2,
+ ttm_driver_type3,
+ ttm_driver_type4,
+ ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ /* struct rcu_head rhead;XXXKIB */
+ struct drm_hash_item hash;
+ enum ttm_object_type object_type;
+ bool shareable;
+ struct ttm_object_file *tfile;
+ u_int refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * Also verifies that the object is visible to the application, by
+ * comparing the @tfile argument and checking the object shareable flag.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed);
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @hash_order: Order of hash table used to hash the base objects.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *ttm_object_device_init
+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c
new file mode 100644
index 0000000..9a30a46
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define VM_ALLOC_DMA32 VM_ALLOC_RESERVED1
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ struct mtx lock;
+ bool fill_lock;
+ bool dma32;
+ struct pglist list;
+ int ttm_page_alloc_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ unsigned int kobj_ref;
+ eventhandler_tag lowmem_handler;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool u_pools[NUM_POOLS];
+ struct _utag {
+ struct ttm_page_pool u_wc_pool;
+ struct ttm_page_pool u_uc_pool;
+ struct ttm_page_pool u_wc_pool_dma32;
+ struct ttm_page_pool u_uc_pool_dma32;
+ } _ut;
+ } _u;
+};
+
+#define pools _u.u_pools
+#define wc_pool _u._ut.u_wc_pool
+#define uc_pool _u._ut.u_uc_pool
+#define wc_pool_dma32 _u._ut.u_wc_pool_dma32
+#define uc_pool_dma32 _u._ut.u_uc_pool_dma32
+
+MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
+
+static void
+ttm_vm_page_free(vm_page_t m)
+{
+
+ KASSERT(m->object == NULL, ("ttm page %p is owned", m));
+ KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
+ KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
+ m->flags &= ~PG_FICTITIOUS;
+ m->oflags |= VPO_UNMANAGED;
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+}
+
+static vm_memattr_t
+ttm_caching_state_to_vm(enum ttm_caching_state cstate)
+{
+
+ switch (cstate) {
+ case tt_uncached:
+ return (VM_MEMATTR_UNCACHEABLE);
+ case tt_wc:
+ return (VM_MEMATTR_WRITE_COMBINING);
+ case tt_cached:
+ return (VM_MEMATTR_WRITE_BACK);
+ }
+ panic("caching state %d\n", cstate);
+}
+
+static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
+{
+
+ free(m, M_TTM_POOLMGR);
+}
+
+#if 0
+/* XXXKIB sysctl */
+static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
+ struct attribute *attr, char *buffer)
+{
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+#endif
+
+static struct ttm_pool_manager *_manager;
+
+static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < addrinarray; i++) {
+ m = pages[i];
+#ifdef TTM_HAS_AGP
+ unmap_page_from_agp(m);
+#endif
+ pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
+ }
+ return 0;
+}
+
+static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < addrinarray; i++) {
+ m = pages[i];
+#ifdef TTM_HAS_AGP
+ map_page_into_agp(pages[i]);
+#endif
+ pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
+ }
+ return 0;
+}
+
+static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
+{
+ vm_page_t m;
+ int i;
+
+ for (i = 0; i < addrinarray; i++) {
+ m = pages[i];
+#ifdef TTM_HAS_AGP
+ map_page_into_agp(pages[i]);
+#endif
+ pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
+ }
+ return 0;
+}
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager->pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(vm_page_t *pages, unsigned npages)
+{
+ unsigned i;
+
+ /* Our VM handles vm memattr automatically on the page free. */
+ if (set_pages_array_wb(pages, npages))
+ printf("[TTM] Failed to set %d pages to wb!\n", npages);
+ for (i = 0; i < npages; ++i)
+ ttm_vm_page_free(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ vm_page_t p, p1;
+ vm_page_t *pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
+ M_TEMP, M_WAITOK | M_ZERO);
+
+restart:
+ mtx_lock(&pool->lock);
+
+ TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ TAILQ_REMOVE(&pool->list, p, pageq);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ mtx_unlock(&pool->lock);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ TAILQ_REMOVE(&pool->list, p, pageq);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ mtx_unlock(&pool->lock);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ free(pages_to_free, M_TEMP);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager->pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(void *arg)
+{
+ static unsigned int start_pool = 0;
+ unsigned i;
+ unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
+ struct ttm_page_pool *pool;
+ int shrink_pages = 100; /* XXXKIB */
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+
+ manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
+ ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+
+ EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
+}
+
+static int ttm_set_pages_caching(vm_page_t *pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printf("[TTM] Failed to set %d pages to uc!\n", cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printf("[TTM] Failed to set %d pages to wc!\n", cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct pglist *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ vm_page_t *failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages have to be freed */
+ for (i = 0; i < cpages; ++i) {
+ TAILQ_REMOVE(pages, failed_pages[i], pageq);
+ ttm_vm_page_free(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ vm_page_t *caching_array;
+ vm_page_t p;
+ int r = 0;
+ unsigned i, cpages, aflags;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
+
+ aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
+ ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
+ VM_ALLOC_ZERO : 0);
+
+ /* allocate array for page caching change */
+ caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
+ M_WAITOK | M_ZERO);
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
+ (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
+ VM_MAX_ADDRESS, PAGE_SIZE, 0,
+ ttm_caching_state_to_vm(cstate));
+ if (!p) {
+ printf("[TTM] Unable to get page %u\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p->oflags &= ~VPO_UNMANAGED;
+ p->flags |= PG_FICTITIOUS;
+
+#ifdef CONFIG_HIGHMEM /* KIB: nop */
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ TAILQ_INSERT_HEAD(pages, p, pageq);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ free(caching_array, M_TEMP);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there aren't enough pages and the requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ vm_page_t p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there are not enough
+ * pages in a pool we fill the pool up first. */
+ if (count < _manager->options.small
+ && count > pool->npages) {
+ struct pglist new_pages;
+ unsigned alloc_size = _manager->options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ mtx_unlock(&pool->lock);
+
+ TAILQ_INIT(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
+ ttm_flags, cstate, alloc_size);
+ mtx_lock(&pool->lock);
+
+ if (!r) {
+ TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printf("[TTM] Failed to fill pool (%p)\n", pool);
+ /* If we have any pages left put them to the pool. */
+ TAILQ_FOREACH(p, &pool->list, pageq) {
+ ++cpages;
+ }
+ TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut 'count' number of pages from the pool and put them on the return list.
+ *
+ * @return count of pages still required to fulfill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct pglist *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count)
+{
+ vm_page_t p;
+ unsigned i;
+
+ mtx_lock(&pool->lock);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ TAILQ_CONCAT(pages, &pool->list, pageq);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ for (i = 0; i < count; i++) {
+ p = TAILQ_FIRST(&pool->list);
+ TAILQ_REMOVE(&pool->list, p, pageq);
+ TAILQ_INSERT_TAIL(pages, p, pageq);
+ }
+ pool->npages -= count;
+ count = 0;
+out:
+ mtx_unlock(&pool->lock);
+ return count;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ unsigned i;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ ttm_vm_page_free(pages[i]);
+ pages[i] = NULL;
+ }
+ }
+ return;
+ }
+
+ mtx_lock(&pool->lock);
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
+ pages[i] = NULL;
+ pool->npages++;
+ }
+ }
+ /* Check that we don't go over the pool limit */
+ npages = 0;
+ if (pool->npages > _manager->options.max_size) {
+ npages = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ mtx_unlock(&pool->lock);
+ if (npages)
+ ttm_page_pool_free(pool, npages);
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct pglist plist;
+ vm_page_t p = NULL;
+ int gfp_flags, aflags;
+ unsigned count;
+ int r;
+
+ aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ for (r = 0; r < npages; ++r) {
+ p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
+ (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
+ VM_MAX_ADDRESS, PAGE_SIZE,
+ 0, ttm_caching_state_to_vm(cstate));
+ if (!p) {
+ printf("[TTM] Unable to allocate page\n");
+ return -ENOMEM;
+ }
+ p->oflags &= ~VPO_UNMANAGED;
+ p->flags |= PG_FICTITIOUS;
+ pages[r] = p;
+ }
+ return 0;
+ }
+
+ /* combine zero flag to pool flags */
+ gfp_flags = flags | pool->ttm_page_alloc_flags;
+
+ /* First we take pages from the pool */
+ TAILQ_INIT(&plist);
+ npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+ count = 0;
+ TAILQ_FOREACH(p, &plist, pageq) {
+ pages[count++] = p;
+ }
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ TAILQ_FOREACH(p, &plist, pageq) {
+ pmap_zero_page(p);
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (npages > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ TAILQ_INIT(&plist);
+ r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
+ npages);
+ TAILQ_FOREACH(p, &plist, pageq) {
+ pages[count++] = p;
+ }
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printf("[TTM] Failed to allocate extra pages for large request\n");
+ ttm_put_pages(pages, count, flags, cstate);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
+ pool->fill_lock = false;
+ TAILQ_INIT(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->ttm_page_alloc_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+
+ if (_manager != NULL)
+ printf("[TTM] manager != NULL\n");
+ printf("[TTM] Initializing pool allocator\n");
+
+ _manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
+
+ ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
+ ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
+ ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+ TTM_PAGE_FLAG_DMA32, "wc dma");
+ ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+ TTM_PAGE_FLAG_DMA32, "uc dma");
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ refcount_init(&_manager->kobj_ref, 1);
+ ttm_pool_mm_shrink_init(_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini(void)
+{
+ int i;
+
+ printf("[TTM] Finalizing pool allocator\n");
+ ttm_pool_mm_shrink_fini(_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+
+ if (refcount_release(&_manager->kobj_ref))
+ ttm_pool_kobj_release(_manager);
+ _manager = NULL;
+}
+
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned i;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_get_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ if (ret != 0) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i]) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ ttm_put_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ }
+ }
+ ttm->state = tt_unpopulated;
+}
+
+#if 0
+/* XXXKIB sysctl */
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager->pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.h b/sys/dev/drm2/ttm/ttm_page_alloc.h
new file mode 100644
index 0000000..0824e2d
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ */
+/* $FreeBSD$ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_memory.h>
+
+/**
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
+/**
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
+ *
+ * Free all pages of @ttm
+ */
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+/* XXXKIB
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+*/
+
+#ifdef CONFIG_SWIOTLB
+/**
+ * Initialize pool allocator.
+ */
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
+/**
+ * Free pool allocator.
+ */
+void ttm_dma_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+ unsigned max_pages)
+{
+ return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+/* XXXKIB
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ return 0;
+}
+*/
+#endif
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc_dma.c b/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 0000000..a5c6fed
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ * the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ * when freed).
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 4
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED (0)
+#define IS_WC (1<<1)
+#define IS_UC (1<<2)
+#define IS_CACHED (1<<3)
+#define IS_DMA32 (1<<4)
+
+enum pool_type {
+ POOL_IS_UNDEFINED,
+ POOL_IS_WC = IS_WC,
+ POOL_IS_UC = IS_UC,
+ POOL_IS_CACHED = IS_CACHED,
+ POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+ POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+ POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ * - generic (not restricted to DMA32):
+ * - write combined, uncached, cached.
+ * - dma32 (up to 2^32 - so up 4GB):
+ * - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ * it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ * Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+ struct list_head pools; /* The 'struct device->dma_pools link */
+ enum pool_type type;
+ spinlock_t lock;
+ struct list_head inuse_list;
+ struct list_head free_list;
+ struct device *dev;
+ unsigned size;
+ unsigned npages_free;
+ unsigned npages_in_use;
+ unsigned long nfrees; /* Stats when shrunk. */
+ unsigned long nrefills; /* Stats when grown. */
+ gfp_t gfp_flags;
+ char name[13]; /* "cached dma32" */
+ char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ * via the DMA API, it will be -1.
+ */
+struct dma_page {
+ struct list_head page_list;
+ void *vaddr;
+ struct page *p;
+ dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+ struct list_head pools;
+ struct device *dev;
+ struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+ struct mutex lock;
+ struct list_head pools;
+ struct ttm_pool_opts options;
+ unsigned npools;
+ struct shrinker mm_shrink;
+ struct kobject kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+ struct page **pages, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ if (pool->type & IS_UC) {
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ pr_err("%s: Failed to set %d pages to uc!\n",
+ pool->dev_name, cpages);
+ }
+ if (pool->type & IS_WC) {
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ pr_err("%s: Failed to set %d pages to wc!\n",
+ pool->dev_name, cpages);
+ }
+ return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+ dma_addr_t dma = d_page->dma;
+ dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+ kfree(d_page);
+ d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+ struct dma_page *d_page;
+
+ d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+ if (!d_page)
+ return NULL;
+
+ d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+ &d_page->dma,
+ pool->gfp_flags);
+ if (d_page->vaddr)
+ d_page->p = virt_to_page(d_page->vaddr);
+ else {
+ kfree(d_page);
+ d_page = NULL;
+ }
+ return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+ enum pool_type type = IS_UNDEFINED;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ type |= IS_DMA32;
+ if (cstate == tt_cached)
+ type |= IS_CACHED;
+ else if (cstate == tt_uncached)
+ type |= IS_UC;
+ else
+ type |= IS_WC;
+
+ return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages_free -= freed_pages;
+ pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+ struct page *pages[], unsigned npages)
+{
+ struct dma_page *d_page, *tmp;
+
+ /* Don't set WB on WB page pool. */
+ if (npages && !(pool->type & IS_CACHED) &&
+ set_pages_array_wb(pages, npages))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, npages);
+
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+ struct list_head d_pages;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+ if (nr_free > 1) {
+ pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+ pool->dev_name, pool->name, current->pid,
+ npages_to_free, nr_free);
+ }
+#endif
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pages_to_free) {
+ pr_err("%s: Failed to allocate memory for pool free operation\n",
+ pool->dev_name);
+ return 0;
+ }
+ INIT_LIST_HEAD(&d_pages);
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ /* We picking the oldest ones off the list */
+ list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+ page_list) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ /* Move the dma_page from one list to another. */
+ list_move(&dma_p->page_list, &d_pages);
+
+ pages_to_free[freed_pages++] = dma_p->p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+ freed_pages);
+
+ INIT_LIST_HEAD(&d_pages);
+
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+ struct device_pools *p;
+ struct dma_pool *pool;
+
+ if (!dev)
+ return;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry_reverse(p, &_manager->pools, pools) {
+ if (p->dev != dev)
+ continue;
+ pool = p->pool;
+ if (pool->type != type)
+ continue;
+
+ list_del(&p->pools);
+ kfree(p);
+ _manager->npools--;
+ break;
+ }
+ list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ /* Takes a spinlock.. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ /* This code path is called after _all_ references to the
+ * struct device has been dropped - so nobody should be
+ * touching it. In case somebody is trying to _add_ we are
+ * guarded by the mutex. */
+ list_del(&pool->pools);
+ kfree(pool);
+ break;
+ }
+ mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+ struct dma_pool *pool = *(struct dma_pool **)res;
+
+ if (pool)
+ ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+ return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+ enum pool_type type)
+{
+ char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ struct device_pools *sec_pool = NULL;
+ struct dma_pool *pool = NULL, **ptr;
+ unsigned i;
+ int ret = -ENODEV;
+ char *p;
+
+ if (!dev)
+ return NULL;
+
+ ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ret = -ENOMEM;
+
+ pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!pool)
+ goto err_mem;
+
+ sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!sec_pool)
+ goto err_mem;
+
+ INIT_LIST_HEAD(&sec_pool->pools);
+ sec_pool->dev = dev;
+ sec_pool->pool = pool;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->inuse_list);
+ INIT_LIST_HEAD(&pool->pools);
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->npages_free = pool->npages_in_use = 0;
+ pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->size = PAGE_SIZE;
+ pool->type = type;
+ pool->nrefills = 0;
+ p = pool->name;
+ for (i = 0; i < 5; i++) {
+ if (type & t[i]) {
+ p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ "%s", n[i]);
+ }
+ }
+ *p = 0;
+ /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+ * - the kobj->name has already been deallocated.*/
+ snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+ dev_driver_string(dev), dev_name(dev));
+ mutex_lock(&_manager->lock);
+ /* You can get the dma_pool from either the global: */
+ list_add(&sec_pool->pools, &_manager->pools);
+ _manager->npools++;
+ /* or from 'struct device': */
+ list_add(&pool->pools, &dev->dma_pools);
+ mutex_unlock(&_manager->lock);
+
+ *ptr = pool;
+ devres_add(dev, ptr);
+
+ return pool;
+err_mem:
+ devres_free(ptr);
+ kfree(sec_pool);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+ enum pool_type type)
+{
+ struct dma_pool *pool, *tmp, *found = NULL;
+
+ if (type == IS_UNDEFINED)
+ return found;
+
+ /* NB: We iterate on the 'struct dev' which has no spinlock, but
+ * it does have a kref which we have taken. The kref is taken during
+ * graphic driver loading - in the drm_pci_init it calls either
+ * pci_dev_get or pci_register_driver which both end up taking a kref
+ * on 'struct device'.
+ *
+ * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+ * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+ * thing is at that point of time there are no pages associated with the
+ * driver so this function will not be called.
+ */
+ list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ found = pool;
+ break;
+ }
+ return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+ struct list_head *d_pages,
+ struct page **failed_pages,
+ unsigned cpages)
+{
+ struct dma_page *d_page, *tmp;
+ struct page *p;
+ unsigned i = 0;
+
+ p = failed_pages[0];
+ if (!p)
+ return;
+ /* Find the failed page. */
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ if (d_page->p != p)
+ continue;
+ /* .. and then progress over the full list. */
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ if (++i < cpages)
+ p = failed_pages[i];
+ else
+ break;
+ }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+ struct list_head *d_pages,
+ unsigned count)
+{
+ struct page **caching_array;
+ struct dma_page *dma_p;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ pr_err("%s: Unable to allocate table for new pages\n",
+ pool->dev_name);
+ return -ENOMEM;
+ }
+
+ if (count > 1) {
+ pr_debug("%s: (%s:%d) Getting %d pages\n",
+ pool->dev_name, pool->name, current->pid, count);
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ dma_p = __ttm_dma_alloc_page(pool);
+ if (!dma_p) {
+ pr_err("%s: Unable to get page %u\n",
+ pool->dev_name, i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+ /* Note: Cannot hold the spinlock */
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r) {
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+ list_add(&dma_p->page_list, d_pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array, cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(pool, d_pages,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+ unsigned long *irq_flags)
+{
+ unsigned count = _manager->options.small;
+ int r = pool->npages_free;
+
+ if (count > pool->npages_free) {
+ struct list_head d_pages;
+
+ INIT_LIST_HEAD(&d_pages);
+
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ /* Returns how many more are neccessary to fulfill the
+ * request. */
+ r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ if (!r) {
+ /* Add the fresh to the end.. */
+ list_splice(&d_pages, &pool->free_list);
+ ++pool->nrefills;
+ pool->npages_free += count;
+ r = count;
+ } else {
+ struct dma_page *d_page;
+ unsigned cpages = 0;
+
+ pr_err("%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
+
+ list_for_each_entry(d_page, &d_pages, page_list) {
+ cpages++;
+ }
+ list_splice_tail(&d_pages, &pool->free_list);
+ pool->npages_free += cpages;
+ r = cpages;
+ }
+ }
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+ struct ttm_dma_tt *ttm_dma,
+ unsigned index)
+{
+ struct dma_page *d_page;
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ unsigned long irq_flags;
+ int count, r = -ENOMEM;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+ if (count) {
+ d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+ ttm->pages[index] = d_page->p;
+ ttm_dma->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ r = 0;
+ pool->npages_in_use += 1;
+ pool->npages_free -= 1;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct dma_pool *pool;
+ enum pool_type type;
+ unsigned i;
+ gfp_t gfp_flags;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool) {
+ pool = ttm_dma_pool_init(dev, gfp_flags, type);
+ if (IS_ERR_OR_NULL(pool)) {
+ return -ENOMEM;
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+ struct device_pools *p;
+ unsigned total = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ total += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct dma_pool *pool;
+ struct dma_page *d_page, *next;
+ enum pool_type type;
+ bool is_cached = false;
+ unsigned count = 0, i, npages = 0;
+ unsigned long irq_flags;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool)
+ return;
+
+ is_cached = (ttm_dma_find_pool(pool->dev,
+ ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+ /* make sure pages array match list and count number of pages */
+ list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ ttm->pages[count] = d_page->p;
+ count++;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ if (is_cached) {
+ pool->nfrees += count;
+ } else {
+ pool->npages_free += count;
+ list_splice(&ttm_dma->pages_list, &pool->free_list);
+ npages = count;
+ if (pool->npages_free > _manager->options.max_size) {
+ npages = pool->npages_free - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (is_cached) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p);
+ ttm_dma_page_put(pool, d_page);
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->pages[i] = NULL;
+ ttm_dma->dma_address[i] = 0;
+ }
+
+ /* shrink pool if necessary (only on !is_cached pools)*/
+ if (npages)
+ ttm_dma_page_pool_free(pool, npages);
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned idx = 0;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned shrink_pages = sc->nr_to_scan;
+ struct device_pools *p;
+
+ if (list_empty(&_manager->pools))
+ return 0;
+
+ mutex_lock(&_manager->lock);
+ pool_offset = pool_offset % _manager->npools;
+ list_for_each_entry(p, &_manager->pools, pools) {
+ unsigned nr_free;
+
+ if (!p->dev)
+ continue;
+ if (shrink_pages == 0)
+ break;
+ /* Do it in round-robin fashion. */
+ if (++idx < pool_offset)
+ continue;
+ nr_free = shrink_pages;
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ p->pool->dev_name, p->pool->name, current->pid,
+ nr_free, shrink_pages);
+ }
+ mutex_unlock(&_manager->lock);
+ /* return estimated number of unused pages in pool */
+ return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret = -ENOMEM;
+
+ WARN_ON(_manager);
+
+ pr_info("Initializing DMA pool allocator\n");
+
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ goto err;
+
+ mutex_init(&_manager->lock);
+ INIT_LIST_HEAD(&_manager->pools);
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ /* This takes care of auto-freeing the _manager */
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "dma_pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager->kobj);
+ goto err;
+ }
+ ttm_dma_pool_mm_shrink_init(_manager);
+ return 0;
+err:
+ return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+ struct device_pools *p, *t;
+
+ pr_info("Finalizing DMA pool allocator\n");
+ ttm_dma_pool_mm_shrink_fini(_manager);
+
+ list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+ dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+ current->pid);
+ WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+ ttm_dma_pool_match, p->pool));
+ ttm_dma_free_pool(p->dev, p->pool->type);
+ }
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct device_pools *p;
+ struct dma_pool *pool = NULL;
+ char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+ "name", "virt", "busaddr"};
+
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+ h[0], h[1], h[2], h[3], h[4], h[5]);
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools) {
+ struct device *dev = p->dev;
+ if (!dev)
+ continue;
+ pool = p->pool;
+ seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+ pool->name, pool->nrefills,
+ pool->nfrees, pool->npages_in_use,
+ pool->npages_free,
+ pool->dev_name);
+ }
+ mutex_unlock(&_manager->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/sys/dev/drm2/ttm/ttm_placement.h b/sys/dev/drm2/ttm/ttm_placement.h
new file mode 100644
index 0000000..20e1b4c
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_placement.h
@@ -0,0 +1,93 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/* $FreeBSD$ */
+
+#ifndef _TTM_PLACEMENT_H_
+#define _TTM_PLACEMENT_H_
+/*
+ * Memory regions for data placement.
+ */
+
+#define TTM_PL_SYSTEM 0
+#define TTM_PL_TT 1
+#define TTM_PL_VRAM 2
+#define TTM_PL_PRIV0 3
+#define TTM_PL_PRIV1 4
+#define TTM_PL_PRIV2 5
+#define TTM_PL_PRIV3 6
+#define TTM_PL_PRIV4 7
+#define TTM_PL_PRIV5 8
+#define TTM_PL_SWAPPED 15
+
+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
+#define TTM_PL_MASK_MEM 0x0000FFFF
+
+/*
+ * Other flags that affects data placement.
+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
+ * if available.
+ * TTM_PL_FLAG_SHARED means that another application may
+ * reference the buffer.
+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
+ * be evicted to make room for other buffers.
+ */
+
+#define TTM_PL_FLAG_CACHED (1 << 16)
+#define TTM_PL_FLAG_UNCACHED (1 << 17)
+#define TTM_PL_FLAG_WC (1 << 18)
+#define TTM_PL_FLAG_SHARED (1 << 20)
+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
+
+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
+ TTM_PL_FLAG_UNCACHED | \
+ TTM_PL_FLAG_WC)
+
+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+
+/*
+ * Access flags to be used for CPU- and GPU- mappings.
+ * The idea is that the TTM synchronization mechanism will
+ * allow concurrent READ access and exclusive write access.
+ * Currently GPU- and CPU accesses are exclusive.
+ */
+
+#define TTM_ACCESS_READ (1 << 0)
+#define TTM_ACCESS_WRITE (1 << 1)
+
+#endif
diff --git a/sys/dev/drm2/ttm/ttm_tt.c b/sys/dev/drm2/ttm/ttm_tt.c
new file mode 100644
index 0000000..82547f1
--- /dev/null
+++ b/sys/dev/drm2/ttm/ttm_tt.c
@@ -0,0 +1,370 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/*
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/ttm/ttm_module.h>
+#include <dev/drm2/ttm/ttm_bo_driver.h>
+#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/drm2/ttm/ttm_page_alloc.h>
+
+MALLOC_DEFINE(M_TTM_PD, "ttm_pd", "TTM Page Directories");
+
+/**
+ * Allocates storage for pointers to the pages that back the ttm.
+ */
+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+ ttm->pages = malloc(ttm->num_pages * sizeof(void *),
+ M_TTM_PD, M_WAITOK | M_ZERO);
+}
+
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+ ttm->ttm.pages = malloc(ttm->ttm.num_pages * sizeof(void *),
+ M_TTM_PD, M_WAITOK | M_ZERO);
+ ttm->dma_address = malloc(ttm->ttm.num_pages *
+ sizeof(*ttm->dma_address), M_TTM_PD, M_WAITOK);
+}
+
+#if defined(__i386__) || defined(__amd64__)
+static inline int ttm_tt_set_page_caching(vm_page_t p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
+{
+
+ /* XXXKIB our VM does not need this. */
+#if 0
+ if (c_old != tt_cached) {
+ /* p isn't in the default caching state, set it to
+ * writeback first to free its current memtype. */
+ pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
+ }
+#endif
+
+ if (c_new == tt_wc)
+ pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
+ else if (c_new == tt_uncached)
+ pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
+
+ return (0);
+}
+#else
+static inline int ttm_tt_set_page_caching(vm_page_t p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int ttm_tt_set_caching(struct ttm_tt *ttm,
+ enum ttm_caching_state c_state)
+{
+ int i, j;
+ vm_page_t cur_page;
+ int ret;
+
+ if (ttm->caching_state == c_state)
+ return 0;
+
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
+ }
+
+ if (ttm->caching_state == tt_cached)
+ drm_clflush_pages(ttm->pages, ttm->num_pages);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages[i];
+ if (likely(cur_page != NULL)) {
+ ret = ttm_tt_set_page_caching(cur_page,
+ ttm->caching_state,
+ c_state);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+ }
+
+ ttm->caching_state = c_state;
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; ++j) {
+ cur_page = ttm->pages[j];
+ if (cur_page != NULL) {
+ (void)ttm_tt_set_page_caching(cur_page, c_state,
+ ttm->caching_state);
+ }
+ }
+
+ return ret;
+}
+
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+{
+ enum ttm_caching_state state;
+
+ if (placement & TTM_PL_FLAG_WC)
+ state = tt_wc;
+ else if (placement & TTM_PL_FLAG_UNCACHED)
+ state = tt_uncached;
+ else
+ state = tt_cached;
+
+ return ttm_tt_set_caching(ttm, state);
+}
+
+void ttm_tt_destroy(struct ttm_tt *ttm)
+{
+ if (unlikely(ttm == NULL))
+ return;
+
+ if (ttm->state == tt_bound) {
+ ttm_tt_unbind(ttm);
+ }
+
+ if (likely(ttm->pages != NULL)) {
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ }
+
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
+ ttm->swap_storage)
+ vm_object_deallocate(ttm->swap_storage);
+
+ ttm->swap_storage = NULL;
+ ttm->func->destroy(ttm);
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ vm_page_t dummy_read_page)
+{
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
+
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ printf("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void ttm_tt_fini(struct ttm_tt *ttm)
+{
+ free(ttm->pages, M_TTM_PD);
+ ttm->pages = NULL;
+}
+
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ vm_page_t dummy_read_page)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (!ttm->pages || !ttm_dma->dma_address) {
+ ttm_tt_destroy(ttm);
+ printf("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ free(ttm->pages, M_TTM_PD);
+ ttm->pages = NULL;
+ free(ttm_dma->dma_address, M_TTM_PD);
+ ttm_dma->dma_address = NULL;
+}
+
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+ int ret;
+
+ if (ttm->state == tt_bound) {
+ ret = ttm->func->unbind(ttm);
+ MPASS(ret == 0);
+ ttm->state = tt_unbound;
+ }
+}
+
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+ int ret = 0;
+
+ if (!ttm)
+ return -EINVAL;
+
+ if (ttm->state == tt_bound)
+ return 0;
+
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+
+ ret = ttm->func->bind(ttm, bo_mem);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ttm->state = tt_bound;
+
+ return 0;
+}
+
+int ttm_tt_swapin(struct ttm_tt *ttm)
+{
+ vm_object_t obj;
+ vm_page_t from_page, to_page;
+ int i, ret, rv;
+
+ obj = ttm->swap_storage;
+
+ VM_OBJECT_LOCK(obj);
+ vm_object_pip_add(obj, 1);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
+ if (from_page->valid != VM_PAGE_BITS_ALL) {
+ if (vm_pager_has_page(obj, i, NULL, NULL)) {
+ rv = vm_pager_get_pages(obj, &from_page, 1, 0);
+ if (rv != VM_PAGER_OK) {
+ vm_page_lock(from_page);
+ vm_page_free(from_page);
+ vm_page_unlock(from_page);
+ ret = -EIO;
+ goto err_ret;
+ }
+ } else
+ vm_page_zero_invalid(from_page, TRUE);
+ }
+ to_page = ttm->pages[i];
+ if (unlikely(to_page == NULL)) {
+ vm_page_wakeup(from_page);
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+ pmap_copy_page(from_page, to_page);
+ vm_page_wakeup(from_page);
+ }
+ vm_object_pip_wakeup(obj);
+ VM_OBJECT_UNLOCK(obj);
+
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
+ vm_object_deallocate(obj);
+ ttm->swap_storage = NULL;
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ return (0);
+
+err_ret:
+ vm_object_pip_wakeup(obj);
+ VM_OBJECT_UNLOCK(obj);
+ return (ret);
+}
+
+int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
+{
+ vm_object_t obj;
+ vm_page_t from_page, to_page;
+ int i;
+
+ MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
+ MPASS(ttm->caching_state == tt_cached);
+
+ if (persistent_swap_storage == NULL) {
+ obj = vm_pager_allocate(OBJT_SWAP, NULL,
+ IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
+ curthread->td_ucred);
+ if (obj == NULL) {
+ printf("[TTM] Failed allocating swap storage\n");
+ return (-ENOMEM);
+ }
+ } else
+ obj = persistent_swap_storage;
+
+ VM_OBJECT_LOCK(obj);
+ vm_object_pip_add(obj, 1);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = ttm->pages[i];
+ if (unlikely(from_page == NULL))
+ continue;
+ to_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
+ pmap_copy_page(from_page, to_page);
+ vm_page_dirty(to_page);
+ to_page->valid = VM_PAGE_BITS_ALL;
+ vm_page_wakeup(to_page);
+ }
+ vm_object_pip_wakeup(obj);
+ VM_OBJECT_UNLOCK(obj);
+
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ ttm->swap_storage = obj;
+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ if (persistent_swap_storage != NULL)
+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+ return (0);
+}
diff --git a/sys/dev/hwpmc/hwpmc_soft.c b/sys/dev/hwpmc/hwpmc_soft.c
index 48e297e..56af775 100644
--- a/sys/dev/hwpmc/hwpmc_soft.c
+++ b/sys/dev/hwpmc/hwpmc_soft.c
@@ -45,8 +45,6 @@ __FBSDID("$FreeBSD$");
#define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
PMC_CAP_USER | PMC_CAP_SYSTEM)
-PMC_SOFT_DECLARE( , , clock, prof);
-
struct soft_descr {
struct pmc_descr pm_descr; /* "base class" */
};
@@ -126,9 +124,10 @@ soft_allocate_pmc(int cpu, int ri, struct pmc *pm,
if (ps == NULL)
return (EINVAL);
pmc_soft_ev_release(ps);
+ /* Module unload is protected by pmc SX lock. */
+ if (ps->ps_alloc != NULL)
+ ps->ps_alloc();
- if (ev == pmc___clock_prof.ps_ev.pm_ev_code)
- cpu_startprofclock();
return (0);
}
@@ -315,6 +314,8 @@ static int
soft_release_pmc(int cpu, int ri, struct pmc *pmc)
{
struct pmc_hw *phw;
+ enum pmc_event ev;
+ struct pmc_soft *ps;
(void) pmc;
@@ -328,8 +329,16 @@ soft_release_pmc(int cpu, int ri, struct pmc *pmc)
KASSERT(phw->phw_pmc == NULL,
("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
- if (pmc->pm_event == pmc___clock_prof.ps_ev.pm_ev_code)
- cpu_stopprofclock();
+ ev = pmc->pm_event;
+
+ /* Check if event is registered. */
+ ps = pmc_soft_ev_acquire(ev);
+ KASSERT(ps != NULL,
+ ("[soft,%d] unregistered event %d", __LINE__, ev));
+ pmc_soft_ev_release(ps);
+ /* Module unload is protected by pmc SX lock. */
+ if (ps->ps_release != NULL)
+ ps->ps_release();
return (0);
}
diff --git a/sys/dev/ixgbe/LICENSE b/sys/dev/ixgbe/LICENSE
index 0d4f1db..d446282 100644
--- a/sys/dev/ixgbe/LICENSE
+++ b/sys/dev/ixgbe/LICENSE
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index 0b77437..9977803 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "2.5.0";
+char ixgbe_driver_version[] = "2.5.7 - HEAD";
/*********************************************************************
* PCI Device ID Table
@@ -83,7 +83,7 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
@@ -216,7 +216,6 @@ static device_method_t ixgbe_methods[] = {
DEVMETHOD(device_attach, ixgbe_attach),
DEVMETHOD(device_detach, ixgbe_detach),
DEVMETHOD(device_shutdown, ixgbe_shutdown),
-
DEVMETHOD_END
};
@@ -596,6 +595,9 @@ ixgbe_attach(device_t dev)
"PCIE, or x4 PCIE 2 slot is required.\n");
}
+ /* Set an initial default flow control value */
+ adapter->fc = ixgbe_fc_full;
+
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@@ -652,7 +654,7 @@ ixgbe_detach(device_t dev)
for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
if (que->tq) {
-#ifdef IXGBE_LEGACY_TX
+#ifndef IXGBE_LEGACY_TX
taskqueue_drain(que->tq, &txr->txq_task);
#endif
taskqueue_drain(que->tq, &que->que_task);
@@ -1310,7 +1312,7 @@ ixgbe_init_locked(struct adapter *adapter)
tmp = IXGBE_LOW_DV(frame);
hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
- adapter->fc = hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.requested_mode = adapter->fc;
hw->fc.pause_time = IXGBE_FC_PAUSE;
hw->fc.send_xon = TRUE;
}
@@ -1680,7 +1682,7 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- ifmr->ifm_active |= adapter->optics | IFM_FDX;
+ ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= adapter->optics | IFM_FDX;
@@ -1932,18 +1934,6 @@ ixgbe_set_multi(struct adapter *adapter)
bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
MAX_NUM_MULTICAST_ADDRESSES);
- fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- if (ifp->if_flags & IFF_PROMISC)
- fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- else if (ifp->if_flags & IFF_ALLMULTI) {
- fctrl |= IXGBE_FCTRL_MPE;
- fctrl &= ~IXGBE_FCTRL_UPE;
- } else
- fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
-
#if __FreeBSD_version < 800000
IF_ADDR_LOCK(ifp);
#else
@@ -1952,6 +1942,8 @@ ixgbe_set_multi(struct adapter *adapter)
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
+ if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
+ break;
bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
&mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
IXGBE_ETH_LENGTH_OF_ADDRESS);
@@ -1963,9 +1955,24 @@ ixgbe_set_multi(struct adapter *adapter)
if_maddr_runlock(ifp);
#endif
- update_ptr = mta;
- ixgbe_update_mc_addr_list(&adapter->hw,
- update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+ fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ if (ifp->if_flags & IFF_PROMISC)
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
+ ifp->if_flags & IFF_ALLMULTI) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ fctrl &= ~IXGBE_FCTRL_UPE;
+ } else
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+
+ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
+ update_ptr = mta;
+ ixgbe_update_mc_addr_list(&adapter->hw,
+ update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+ }
return;
}
@@ -2172,7 +2179,7 @@ ixgbe_setup_optics(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int layer;
-
+
layer = ixgbe_get_supported_physical_layer(hw);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
@@ -2651,7 +2658,7 @@ ixgbe_config_link(struct adapter *adapter)
taskqueue_enqueue(adapter->tq, &adapter->mod_task);
} else {
if (hw->mac.ops.check_link)
- err = ixgbe_check_link(hw, &autoneg,
+ err = ixgbe_check_link(hw, &adapter->link_speed,
&adapter->link_up, FALSE);
if (err)
goto out;
@@ -2662,8 +2669,8 @@ ixgbe_config_link(struct adapter *adapter)
if (err)
goto out;
if (hw->mac.ops.setup_link)
- err = hw->mac.ops.setup_link(hw, autoneg,
- negotiate, adapter->link_up);
+ err = hw->mac.ops.setup_link(hw,
+ autoneg, adapter->link_up);
}
out:
return;
@@ -3713,6 +3720,8 @@ ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
M_PKTHDR, rxr->mbuf_sz);
if (mp == NULL)
goto update;
+ if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ m_adj(mp, ETHER_ALIGN);
} else
mp = rxbuf->buf;
@@ -4408,7 +4417,6 @@ ixgbe_rxeof(struct ix_queue *que)
/* Make sure bad packets are discarded */
if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
(rxr->discard)) {
- ifp->if_ierrors++;
rxr->rx_discarded++;
if (eop)
rxr->discard = FALSE;
@@ -4734,14 +4742,25 @@ ixgbe_enable_intr(struct adapter *adapter)
/* Enable Fan Failure detection */
if (hw->device_id == IXGBE_DEV_ID_82598AT)
mask |= IXGBE_EIMS_GPI_SDP1;
- else {
- mask |= IXGBE_EIMS_ECC;
- mask |= IXGBE_EIMS_GPI_SDP0;
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ mask |= IXGBE_EIMS_ECC;
+ mask |= IXGBE_EIMS_GPI_SDP0;
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP2;
+#ifdef IXGBE_FDIR
+ mask |= IXGBE_EIMS_FLOW_DIR;
+#endif
+ break;
+ case ixgbe_mac_X540:
+ mask |= IXGBE_EIMS_ECC;
#ifdef IXGBE_FDIR
- mask |= IXGBE_EIMS_FLOW_DIR;
+ mask |= IXGBE_EIMS_FLOW_DIR;
#endif
+ /* falls through */
+ default:
+ break;
}
IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
@@ -4969,7 +4988,7 @@ ixgbe_handle_msf(void *context, int pending)
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
if (hw->mac.ops.setup_link)
- hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
+ hw->mac.ops.setup_link(hw, autoneg, TRUE);
return;
}
@@ -5013,6 +5032,11 @@ ixgbe_update_stats_counters(struct adapter *adapter)
adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ /*
+ ** Note: these are for the 8 possible traffic classes,
+ ** which in current implementation is unused,
+ ** therefore only 0 should read real data.
+ */
for (int i = 0; i < 8; i++) {
u32 mp;
mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
@@ -5022,13 +5046,20 @@ ixgbe_update_stats_counters(struct adapter *adapter)
adapter->stats.mpc[i] += mp;
/* Running comprehensive total for stats display */
total_missed_rx += adapter->stats.mpc[i];
- if (hw->mac.type == ixgbe_mac_82598EB)
+ if (hw->mac.type == ixgbe_mac_82598EB) {
adapter->stats.rnbc[i] +=
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ adapter->stats.qbtc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ adapter->stats.qbrc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ adapter->stats.pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ } else
+ adapter->stats.pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
adapter->stats.pxontxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- adapter->stats.pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
adapter->stats.pxofftxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
adapter->stats.pxoffrxc[i] +=
@@ -5039,12 +5070,6 @@ ixgbe_update_stats_counters(struct adapter *adapter)
for (int i = 0; i < 16; i++) {
adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- adapter->stats.qbrc[i] +=
- ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
- adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- adapter->stats.qbtc[i] +=
- ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
}
adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
@@ -5141,8 +5166,8 @@ ixgbe_update_stats_counters(struct adapter *adapter)
ifp->if_collisions = 0;
/* Rx Errors */
- ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
- adapter->stats.rlec;
+ ifp->if_iqdrops = total_missed_rx;
+ ifp->if_ierrors = adapter->stats.crcerrs + adapter->stats.rlec;
}
/** ixgbe_sysctl_tdh_handler - Handler function
@@ -5528,10 +5553,13 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
ixgbe_disable_rx_drop(adapter);
break;
case ixgbe_fc_none:
- default:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
if (adapter->num_queues > 1)
ixgbe_enable_rx_drop(adapter);
+ break;
+ default:
+ adapter->fc = last;
+ return (EINVAL);
}
/* Don't autoneg if forcing a value */
adapter->hw.fc.disable_fc_autoneg = TRUE;
@@ -5560,7 +5588,7 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
last = adapter->advertise;
error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
- if ((error) || (adapter->advertise == -1))
+ if ((error) || (req->newptr == NULL))
return (error);
if (adapter->advertise == last) /* no change */
@@ -5568,11 +5596,11 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)))
- return (error);
+ return (EINVAL);
if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
- return (error);
+ return (EINVAL);
}
if (adapter->advertise == 1)
@@ -5582,11 +5610,13 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
else if (adapter->advertise == 3)
speed = IXGBE_LINK_SPEED_1GB_FULL |
IXGBE_LINK_SPEED_10GB_FULL;
- else /* bogus value */
- return (error);
+ else { /* bogus value */
+ adapter->advertise = last;
+ return (EINVAL);
+ }
hw->mac.autotry_restart = TRUE;
- hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
+ hw->mac.ops.setup_link(hw, speed, TRUE);
return (error);
}
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index 9d3164c..de3185a 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -49,18 +49,17 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
bool link_up_wait_to_complete);
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
-
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
* @hw: pointer to the HW structure
@@ -155,6 +154,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+ phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
@@ -712,15 +712,15 @@ out:
* ixgbe_setup_mac_link_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
+ bool autoneg = FALSE;
s32 status = IXGBE_SUCCESS;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -766,14 +766,12 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE if waiting is needed to complete
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
@@ -781,7 +779,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_copper_link_82598");
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -1102,15 +1100,16 @@ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
* @eeprom_data: value read
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
{
s32 status = IXGBE_SUCCESS;
u16 sfp_addr = 0;
@@ -1118,7 +1117,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u16 sfp_stat = 0;
u32 i;
- DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
+ DEBUGFUNC("ixgbe_read_i2c_phy_82598");
if (hw->phy.type == ixgbe_phy_nl) {
/*
@@ -1126,7 +1125,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
- sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@@ -1158,7 +1157,6 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
status = IXGBE_ERR_PHY;
- goto out;
}
out:
@@ -1166,6 +1164,36 @@ out:
}
/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
* ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
* @hw: pointer to hardware structure
*
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 083f7e1..592440b 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -40,7 +40,6 @@
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
@@ -48,14 +47,37 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
+static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+ u32 fwsm, manc, factps;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+ return FALSE;
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+ return FALSE;
+
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (factps & IXGBE_FACTPS_MNGCG)
+ return FALSE;
+
+ return TRUE;
+}
+
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
- /* enable the laser control functions for SFP+ fiber */
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ /*
+ * enable the laser control functions for SFP+ fiber
+ * and MNG not enabled
+ */
+ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ !(ixgbe_mng_enabled(hw))) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@@ -135,9 +157,8 @@ init_phy_ops_out:
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
- u32 reg_anlp1 = 0;
- u32 i = 0;
u16 list_offset, data_offset, data_value;
+ bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
@@ -171,28 +192,39 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
- /* Now restart DSP by setting Restart_AN and clearing LMS */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
-
- /* Wait for AN to leave state 0 */
- for (i = 0; i < 10; i++) {
- msec_delay(4);
- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
- break;
+ /* Need SW/FW semaphore around AUTOC writes if LESM on,
+ * likewise reset_pipeline requires lock as it also writes
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
+ got_lock = TRUE;
+ }
+
+ /* Restart DSP and set SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
+ IXGBE_AUTOC_LMS_10G_SERIAL));
+ hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = FALSE;
}
- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+
+ if (ret_val) {
DEBUGOUT("sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
- /* Restart DSP by setting Restart_AN and return to SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -216,7 +248,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_ops_82599");
- ret_val = ixgbe_init_phy_ops_generic(hw);
+ ixgbe_init_phy_ops_generic(hw);
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
@@ -289,13 +321,13 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
* ixgbe_get_link_capabilities_82599 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
- * @negotiation: TRUE when autoneg or autotry is enabled
+ * @autoneg: TRUE when autoneg or autotry is enabled
*
* Determines the link capabilities by reading the AUTOC register.
**/
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
- bool *negotiation)
+ bool *autoneg)
{
s32 status = IXGBE_SUCCESS;
u32 autoc = 0;
@@ -309,7 +341,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = TRUE;
+ *autoneg = TRUE;
goto out;
}
@@ -326,22 +358,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = FALSE;
+ *autoneg = FALSE;
break;
case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = FALSE;
+ *autoneg = FALSE;
break;
case IXGBE_AUTOC_LMS_1G_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = TRUE;
+ *autoneg = TRUE;
break;
case IXGBE_AUTOC_LMS_10G_SERIAL:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = FALSE;
+ *autoneg = FALSE;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR:
@@ -353,7 +385,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = TRUE;
+ *autoneg = TRUE;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@@ -364,12 +396,12 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = TRUE;
+ *autoneg = TRUE;
break;
case IXGBE_AUTOC_LMS_SGMII_1G_100M:
*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
- *negotiation = FALSE;
+ *autoneg = FALSE;
break;
default:
@@ -381,7 +413,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = TRUE;
+ *autoneg = TRUE;
}
out:
@@ -424,6 +456,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
@@ -433,6 +466,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
media_type = ixgbe_media_type_copper;
break;
+ case IXGBE_DEV_ID_82599_BYPASS:
+ media_type = ixgbe_media_type_fiber_fixed;
+ hw->phy.multispeed_fiber = TRUE;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -456,17 +493,32 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
s32 status = IXGBE_SUCCESS;
+ bool got_lock = FALSE;
DEBUGFUNC("ixgbe_start_mac_link_82599");
+ /* reset_pipeline requires us to hold this lock as it writes to
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ got_lock = TRUE;
+ }
+
/* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -490,6 +542,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
/* Add delay to filter out noises during initial link setup */
msec_delay(50);
+out:
return status;
}
@@ -555,16 +608,84 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * We set the module speed differently for fixed fiber. For other
+ * multi-speed devices we don't have an error value so here if we
+ * detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
@@ -573,13 +694,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
u32 i = 0;
- bool link_up = FALSE;
- bool negotiation;
+ bool autoneg, link_up = FALSE;
DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
/* Mask off requested but non-supported speeds */
- status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
if (status != IXGBE_SUCCESS)
return status;
@@ -602,16 +722,20 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+ ixgbe_set_fiber_fixed_speed(hw,
+ IXGBE_LINK_SPEED_10GB_FULL);
+ } else {
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
/* Allow module to change analog characteristics (1G->10G) */
msec_delay(40);
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
@@ -653,17 +777,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+ ixgbe_set_fiber_fixed_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ } else {
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
/* Allow module to change analog characteristics (10G->1G) */
msec_delay(40);
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_1GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
@@ -690,7 +818,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed, autoneg, autoneg_wait_to_complete);
+ highest_link_speed, autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@@ -709,13 +837,12 @@ out:
* ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Implements the Intel SmartSpeed algorithm.
**/
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
@@ -748,7 +875,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* First, try to get link with full advertisement */
hw->phy.smart_speed_active = FALSE;
for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
goto out;
@@ -783,7 +910,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* Turn SmartSpeed on to disable KR support */
hw->phy.smart_speed_active = TRUE;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
goto out;
@@ -808,7 +935,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* We didn't get link. Turn SmartSpeed back off. */
hw->phy.smart_speed_active = FALSE;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
out:
@@ -822,32 +949,30 @@ out:
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
+ bool autoneg = FALSE;
s32 status = IXGBE_SUCCESS;
- u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc, pma_pmd_1g, link_mode, start_autoc;
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 start_autoc = autoc;
u32 orig_autoc = 0;
- u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
- u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_mac_link_82599");
/* Check to see if speed passed in is supported. */
status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
- if (status != IXGBE_SUCCESS)
+ if (status)
goto out;
speed &= link_capabilities;
@@ -859,9 +984,14 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
- orig_autoc = hw->mac.orig_autoc;
+ autoc = hw->mac.orig_autoc;
else
- orig_autoc = autoc;
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ orig_autoc = autoc;
+ start_autoc = hw->mac.cached_autoc;
+ link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
@@ -900,9 +1030,31 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+ * likewise reset_pipeline requires us to hold this lock as
+ * it also writes to AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ got_lock = TRUE;
+ }
+
/* Restart link */
- autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ hw->mac.cached_autoc = autoc;
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = FALSE;
+ }
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -937,14 +1089,12 @@ out:
* ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE if waiting is needed to complete
*
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
@@ -952,7 +1102,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_copper_link_82599");
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -1056,14 +1206,45 @@ mac_reset_top:
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+ /* Enable link if disabled in NVM */
+ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
if (hw->mac.orig_link_settings_stored == FALSE) {
hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
+ hw->mac.cached_autoc = autoc;
hw->mac.orig_link_settings_stored = TRUE;
} else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
+ if (autoc != hw->mac.orig_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is
+ * on, likewise reset_pipeline requires us to hold
+ * this lock as it also writes to AUTOC.
+ */
+ bool got_lock = FALSE;
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto reset_hw_out;
+ }
+
+ got_lock = TRUE;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ hw->mac.cached_autoc = hw->mac.orig_autoc;
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ }
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1168,7 +1349,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
- usec_delay(10);
+ msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
DEBUGOUT("Flow Director Signature poll time exceeded!\n");
@@ -2094,7 +2275,7 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
* if the FW version is not supported.
**/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
@@ -2243,4 +2424,55 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
return ret_val;
}
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+ u32 anlp1_reg = 0;
+ u32 i, autoc_reg, autoc2_reg;
+
+ /* Enable link if disabled in NVM */
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ autoc_reg = hw->mac.cached_autoc;
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msec_delay(4);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ DEBUGOUT("auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+
diff --git a/sys/dev/ixgbe/ixgbe_82599.h b/sys/dev/ixgbe/ixgbe_82599.h
index dca39b7..a77d153 100644
--- a/sys/dev/ixgbe/ixgbe_82599.h
+++ b/sys/dev/ixgbe/ixgbe_82599.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,15 +42,15 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool autoneg_wait_to_complete);
+ bool autoneg_wait_to_complete);
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
@@ -61,5 +61,4 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
#endif /* _IXGBE_82599_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index faee9d3..61876fb 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -93,53 +93,53 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_set_mac_type\n");
- if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82598:
- case IXGBE_DEV_ID_82598_BX:
- case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
- case IXGBE_DEV_ID_82598AF_DUAL_PORT:
- case IXGBE_DEV_ID_82598AT:
- case IXGBE_DEV_ID_82598AT2:
- case IXGBE_DEV_ID_82598EB_CX4:
- case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
- case IXGBE_DEV_ID_82598EB_XF_LR:
- case IXGBE_DEV_ID_82598EB_SFP_LOM:
- hw->mac.type = ixgbe_mac_82598EB;
- break;
- case IXGBE_DEV_ID_82599_KX4:
- case IXGBE_DEV_ID_82599_KX4_MEZZ:
- case IXGBE_DEV_ID_82599_XAUI_LOM:
- case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
- case IXGBE_DEV_ID_82599_KR:
- case IXGBE_DEV_ID_82599_SFP:
- case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
- case IXGBE_DEV_ID_82599_SFP_FCOE:
- case IXGBE_DEV_ID_82599_SFP_EM:
- case IXGBE_DEV_ID_82599_SFP_SF2:
- case IXGBE_DEV_ID_82599EN_SFP:
- case IXGBE_DEV_ID_82599_CX4:
- case IXGBE_DEV_ID_82599_T3_LOM:
- hw->mac.type = ixgbe_mac_82599EB;
- break;
- case IXGBE_DEV_ID_82599_VF:
- hw->mac.type = ixgbe_mac_82599_vf;
- break;
- case IXGBE_DEV_ID_X540_VF:
- hw->mac.type = ixgbe_mac_X540_vf;
- break;
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- hw->mac.type = ixgbe_mac_X540;
- break;
- default:
- ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
- break;
- }
- } else {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ hw->mac.type = ixgbe_mac_82598EB;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ case IXGBE_DEV_ID_82599_CX4:
+ case IXGBE_DEV_ID_82599_BYPASS:
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ hw->mac.type = ixgbe_mac_82599EB;
+ break;
+ case IXGBE_DEV_ID_82599_VF:
+ case IXGBE_DEV_ID_82599_VF_HV:
+ hw->mac.type = ixgbe_mac_82599_vf;
+ break;
+ case IXGBE_DEV_ID_X540_VF:
+ case IXGBE_DEV_ID_X540_VF_HV:
+ hw->mac.type = ixgbe_mac_X540_vf;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540_BYPASS:
+ hw->mac.type = ixgbe_mac_X540;
+ break;
+ default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
}
DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
@@ -507,16 +507,14 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* ixgbe_setup_phy_link_speed - Set auto advertise
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
*
* Sets the auto advertised capabilities
**/
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
- autoneg, autoneg_wait_to_complete),
+ autoneg_wait_to_complete),
IXGBE_NOT_IMPLEMENTED);
}
@@ -576,17 +574,15 @@ void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
* ixgbe_setup_link - Set link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
*
* Configures link settings. Restarts the link.
* Performs autonegotiation if needed.
**/
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
- autoneg, autoneg_wait_to_complete),
+ autoneg_wait_to_complete),
IXGBE_NOT_IMPLEMENTED);
}
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 3a9e0cf..507792f 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -72,13 +72,12 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool autoneg_wait_to_complete);
+ bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@@ -159,6 +158,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 51806d6..be8736f 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -147,16 +147,14 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- return IXGBE_SUCCESS;
case IXGBE_DEV_ID_82599_T3_LOM:
+ case IXGBE_DEV_ID_X540T:
return IXGBE_SUCCESS;
default:
return IXGBE_ERR_FC_NOT_SUPPORTED;
@@ -174,6 +172,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = IXGBE_SUCCESS;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
+ bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_fc");
@@ -200,6 +199,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -297,7 +297,28 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on, likewise reset_pipeline requries the lock as
+ * it also writes AUTOC.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+ got_lock = TRUE;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
@@ -680,6 +701,195 @@ s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
}
/**
+ * ixgbe_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (ret_val)
+ return ret_val;
+
+ if (pba_block_size > max_pba_block_size)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+ eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 ret_val;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("ixgbe_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba_word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+ &length);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return IXGBE_ERR_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_get_mac_addr_generic - Generic get MAC address
* @hw: pointer to hardware structure
* @mac_addr: Adapter MAC address
@@ -1268,7 +1478,7 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
- eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
@@ -2719,6 +2929,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2965,6 +3176,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool link_up = 0;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_blink_led_start_generic");
@@ -2975,10 +3187,29 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
if (!link_up) {
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ bool got_lock = FALSE;
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+ got_lock = TRUE;
+ }
+
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
msec_delay(10);
}
@@ -2987,7 +3218,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return IXGBE_SUCCESS;
+out:
+ return ret_val;
}
/**
@@ -2999,21 +3231,43 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
+ bool got_lock = FALSE;
DEBUGFUNC("ixgbe_blink_led_stop_generic");
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+ got_lock = TRUE;
+ }
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return IXGBE_SUCCESS;
+out:
+ return ret_val;
}
/**
@@ -3882,7 +4136,7 @@ void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
{
u32 i;
u8 sum = 0;
@@ -3908,8 +4162,8 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return IXGBE_SUCCESS
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length)
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length)
{
u32 hicr, i, bi;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index 3bbc537..cd9a13d 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,9 +41,14 @@
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
+#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
+struct ixgbe_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
+#endif
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -52,6 +57,13 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -96,6 +108,7 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
@@ -137,5 +150,11 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+
#endif /* IXGBE_COMMON */
diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h
index 8ad18cb..adcba9e 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.h
+++ b/sys/dev/ixgbe/ixgbe_mbx.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -84,8 +84,20 @@
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index a07a625..afd5ab0 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -91,6 +91,9 @@
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
+/* Shared code dropped this define.. */
+#define IXGBE_INTEL_VENDOR_ID 0x8086
+
/* Bunch of defines for shared code bogosity */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index 3f262f9..b22797f 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,8 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
+static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
@@ -71,6 +73,7 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
+ phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic;
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
@@ -563,14 +566,12 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
- UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
@@ -969,9 +970,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_IDENTIFIER,
&identifier);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@@ -985,26 +984,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
/* ID Module
@@ -1102,27 +1095,21 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
vendor_oui =
@@ -1333,6 +1320,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
* ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1425,9 +1428,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msec_delay(100);
- ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
DEBUGOUT("I2C byte read error - Retrying.\n");
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 017ea88..5b2e28b 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,9 @@
#define _IXGBE_PHY_H_
#include "ixgbe_type.h"
-#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
+#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
@@ -48,6 +50,10 @@
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -58,6 +64,9 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -95,6 +104,14 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+#define IXGBE_SFF_SFF_8472_REV_9_3 0x01
+#define IXGBE_SFF_SFF_8472_REV_9_5 0x02
+#define IXGBE_SFF_SFF_8472_REV_10_2 0x03
+#define IXGBE_SFF_SFF_8472_REV_10_4 0x04
+#define IXGBE_SFF_SFF_8472_REV_11_0 0x05
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@@ -108,7 +125,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 52944fd..344097d 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,9 +38,6 @@
#include "ixgbe_osdep.h"
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
-
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
@@ -62,18 +59,24 @@
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
#define IXGBE_DEV_ID_X540T 0x1528
-#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X540_BYPASS 0x155C
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -280,6 +283,7 @@
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
/* Flow Director registers */
#define IXGBE_FDIRCTRL 0x0EE00
#define IXGBE_FDIRHKEY 0x0EE68
@@ -360,11 +364,16 @@
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+
#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
/* Ext Flexible Host Filter Table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100))
+/* Four Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Six Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
/* Each Flexible Filter is at most 128 (0x80) bytes in length */
@@ -396,10 +405,11 @@
#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
-#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
/* Mask for Ext. flex filters */
#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
-#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
/* Wake Up Status */
@@ -420,7 +430,6 @@
#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
-/* Wake Up Packet Length */
#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
/* DCB registers */
@@ -574,6 +583,7 @@
#define IXGBE_RTTBCNRTT 0x05150
#define IXGBE_RTTBCNRD 0x0498C
+
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
@@ -754,11 +764,14 @@
#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
/* Management Bit Fields and Masks */
+#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
/* Firmware Semaphore Register */
#define IXGBE_FWSM_MODE_MASK 0xE
+#define IXGBE_FWSM_TS_ENABLED 0x1
+#define IXGBE_FWSM_FW_MODE_PT 0x4
/* ARC Subsystem registers */
#define IXGBE_HICR 0x15F00
@@ -1014,6 +1027,7 @@
#define IXGBE_RSCCTL_MAXDESC_4 0x04
#define IXGBE_RSCCTL_MAXDESC_8 0x08
#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+#define IXGBE_RSCCTL_TS_DIS 0x02
/* RSCDBU Bit Masks */
#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
@@ -1026,7 +1040,7 @@
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disabl RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
@@ -1052,6 +1066,7 @@
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
/* FACTPS */
+#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */
#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
/* MHADD Bit Masks */
@@ -1590,6 +1605,7 @@ enum {
#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
@@ -1668,6 +1684,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
#define IXGBE_MACC_FSV_10G 0x00030000
@@ -1838,7 +1855,7 @@ enum {
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
-#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* words rd in burst */
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
@@ -2524,7 +2541,6 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
-
/* Physical layer type */
typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
@@ -2757,6 +2773,7 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
@@ -2975,7 +2992,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
@@ -3026,12 +3043,12 @@ struct ixgbe_phy_operations {
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
- bool);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
void (*i2c_bus_clear)(struct ixgbe_hw *);
@@ -3069,7 +3086,9 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
+ u32 cached_autoc;
u8 san_mac_rar_index;
+ bool get_link_status;
u32 orig_autoc2;
u16 max_msix_vectors;
bool arc_subsystem_valid;
@@ -3142,6 +3161,7 @@ struct ixgbe_hw {
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
+ int api_version;
bool force_full_reset;
bool allow_unsupported_sfp;
};
@@ -3185,6 +3205,7 @@ struct ixgbe_hw {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_OUT_OF_MEM -34
+#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index 9e3dd72..14834a2 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -142,6 +142,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
+
DEBUGOUT("Issuing a function level reset to MAC\n");
ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
@@ -272,6 +273,17 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
return vector;
}
+static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, size, 0);
+}
+
/**
* ixgbe_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
@@ -463,11 +475,10 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
*
* Set the link speed in the AUTOC register and restarts link.
**/
-s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
+ UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
return IXGBE_SUCCESS;
}
@@ -483,23 +494,26 @@ s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = IXGBE_SUCCESS;
u32 links_reg;
+ u32 in_msg = 0;
UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
- if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
- *link_up = FALSE;
- *speed = 0;
- return -1;
- }
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = TRUE;
- links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
+ if (!mac->get_link_status)
+ goto out;
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = TRUE;
- else
- *link_up = FALSE;
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
- switch (links_reg & IXGBE_LINKS_SPEED_10G_82599) {
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
@@ -511,6 +525,87 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
}
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = FALSE;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+ int err;
+ u32 msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = IXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc);
return IXGBE_SUCCESS;
}
diff --git a/sys/dev/ixgbe/ixgbe_vf.h b/sys/dev/ixgbe/ixgbe_vf.h
index 5c77ba4..8500768 100644
--- a/sys/dev/ixgbe/ixgbe_vf.h
+++ b/sys/dev/ixgbe/ixgbe_vf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,9 @@
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
+
#define IXGBE_VFCTRL 0x00000
#define IXGBE_VFSTATUS 0x00008
#define IXGBE_VFLINKS 0x00010
@@ -117,7 +120,7 @@ u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool autoneg_wait_to_complete);
+ bool autoneg_wait_to_complete);
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete);
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
@@ -127,5 +130,9 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
index 0635cda..9c7d5cc 100644
--- a/sys/dev/ixgbe/ixgbe_x540.c
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -116,6 +116,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
mac->ops.check_link = &ixgbe_check_mac_link_generic;
+
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 128;
@@ -177,16 +178,14 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
**/
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
DEBUGFUNC("ixgbe_setup_mac_link_X540");
- return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
}
/**
@@ -973,3 +972,4 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
return IXGBE_SUCCESS;
}
+
diff --git a/sys/dev/ixgbe/ixgbe_x540.h b/sys/dev/ixgbe/ixgbe_x540.h
index 29cf8bb..a553874 100644
--- a/sys/dev/ixgbe/ixgbe_x540.h
+++ b/sys/dev/ixgbe/ixgbe_x540.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,7 +41,7 @@ s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool link_up_wait_to_complete);
+ bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c
index b3a3d64..47a707d 100644
--- a/sys/dev/ixgbe/ixv.c
+++ b/sys/dev/ixgbe/ixv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -169,7 +169,6 @@ static device_method_t ixv_methods[] = {
DEVMETHOD(device_attach, ixv_attach),
DEVMETHOD(device_detach, ixv_detach),
DEVMETHOD(device_shutdown, ixv_shutdown),
-
DEVMETHOD_END
};
@@ -1891,7 +1890,6 @@ ixv_config_link(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 autoneg, err = 0;
- bool negotiate = TRUE;
if (hw->mac.ops.check_link)
err = hw->mac.ops.check_link(hw, &autoneg,
@@ -1900,8 +1898,8 @@ ixv_config_link(struct adapter *adapter)
goto out;
if (hw->mac.ops.setup_link)
- err = hw->mac.ops.setup_link(hw, autoneg,
- negotiate, adapter->link_up);
+ err = hw->mac.ops.setup_link(hw,
+ autoneg, adapter->link_up);
out:
return;
}
diff --git a/sys/dev/nvme/nvme.c b/sys/dev/nvme/nvme.c
index ed5dbdb..8e646b7 100644
--- a/sys/dev/nvme/nvme.c
+++ b/sys/dev/nvme/nvme.c
@@ -200,13 +200,11 @@ nvme_modevent(module_t mod, int type, void *arg)
void
nvme_dump_command(struct nvme_command *cmd)
{
- printf("opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x "
- "mptr:%qx prp1:%qx prp2:%qx cdw:%x %x %x %x %x %x\n",
+ printf(
+"opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid,
cmd->rsvd2, cmd->rsvd3,
- (long long unsigned int)cmd->mptr,
- (long long unsigned int)cmd->prp1,
- (long long unsigned int)cmd->prp2,
+ (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2,
cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
cmd->cdw15);
}
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index 1ddf9cc..7ed2f7e 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -131,8 +131,8 @@ nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
- device_printf(ctrlr->dev, "Chatham size: %lld\n",
- (long long)ctrlr->chatham_size);
+ device_printf(ctrlr->dev, "Chatham size: %jd\n",
+ (intmax_t)ctrlr->chatham_size);
reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
diff --git a/sys/dev/oce/oce_hw.c b/sys/dev/oce/oce_hw.c
index e68ba7a..274c4d1 100644
--- a/sys/dev/oce/oce_hw.c
+++ b/sys/dev/oce/oce_hw.c
@@ -405,11 +405,6 @@ oce_create_nw_interface(POCE_SOFTC sc)
sc->if_cap_flags = capab_en_flags;
- /* Enable VLAN Promisc on HW */
- rc = oce_config_vlan(sc, (uint8_t) sc->if_id, NULL, 0, 1, 1);
- if (rc)
- goto error;
-
/* set default flow control */
rc = oce_set_flow_control(sc, sc->flow_control);
if (rc)
@@ -477,12 +472,9 @@ oce_hw_start(POCE_SOFTC sc)
return 1;
if (link.logical_link_status == NTWK_LOGICAL_LINK_UP) {
- sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
sc->link_status = NTWK_LOGICAL_LINK_UP;
if_link_state_change(sc->ifp, LINK_STATE_UP);
} else {
- sc->ifp->if_drv_flags &=
- ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->link_status = NTWK_LOGICAL_LINK_DOWN;
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
diff --git a/sys/dev/oce/oce_hw.h b/sys/dev/oce/oce_hw.h
index 0e79218..aac0a8a 100644
--- a/sys/dev/oce/oce_hw.h
+++ b/sys/dev/oce/oce_hw.h
@@ -38,6 +38,8 @@
/* $FreeBSD$ */
+/* $FreeBSD$ */
+
#include <sys/types.h>
#undef _BIG_ENDIAN /* TODO */
@@ -155,7 +157,10 @@
#define ASYNC_EVENT_LINK_UP 0x1
#define ASYNC_EVENT_LINK_DOWN 0x0
#define ASYNC_EVENT_GRP5 0x5
+#define ASYNC_EVENT_CODE_DEBUG 0x6
#define ASYNC_EVENT_PVID_STATE 0x3
+#define ASYNC_EVENT_DEBUG_QNQ 0x1
+#define ASYNC_EVENT_CODE_SLIPORT 0x11
#define VLAN_VID_MASK 0x0FFF
/* port link_status */
@@ -707,6 +712,17 @@ struct oce_async_event_grp5_pvid_state {
uint32_t code;
};
+/* async event indicating outer VLAN tag in QnQ */
+struct oce_async_event_qnq {
+ uint8_t valid; /* Indicates if outer VLAN is valid */
+ uint8_t rsvd0;
+ uint16_t vlan_tag;
+ uint32_t event_tag;
+ uint8_t rsvd1[4];
+ uint32_t code;
+} ;
+
+
typedef union oce_mq_ext_ctx_u {
uint32_t dw[6];
struct {
@@ -750,6 +766,44 @@ typedef union oce_mq_ext_ctx_u {
/* dw5 */
uint32_t dw8rsvd1;
} v0;
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint32_t cq_id:16;
+ uint32_t num_pages:16;
+ /* dw1 */
+ uint32_t async_evt_bitmap;
+ /* dw2 */
+ uint32_t dw5rsvd2:12;
+ uint32_t ring_size:4;
+ uint32_t async_cq_id:16;
+ /* dw3 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw4 */
+ uint32_t dw7rsvd1:31;
+ uint32_t async_cq_valid:1;
+ #else
+ /* dw0 */
+ uint32_t num_pages:16;
+ uint32_t cq_id:16;
+ /* dw1 */
+ uint32_t async_evt_bitmap;
+ /* dw2 */
+ uint32_t async_cq_id:16;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd2:12;
+ /* dw3 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw4 */
+ uint32_t async_cq_valid:1;
+ uint32_t dw7rsvd1:31;
+ #endif
+ /* dw5 */
+ uint32_t dw8rsvd1;
+ } v1;
+
} oce_mq_ext_ctx_t;
@@ -826,6 +880,7 @@ enum COMMON_SUBSYSTEM_OPCODES {
OPCODE_COMMON_SET_BEACON_CONFIG = 69,
OPCODE_COMMON_GET_BEACON_CONFIG = 70,
OPCODE_COMMON_GET_PHYSICAL_LINK_CONFIG = 71,
+ OPCODE_COMMON_READ_TRANSRECEIVER_DATA = 73,
OPCODE_COMMON_GET_OEM_ATTRIBUTES = 76,
OPCODE_COMMON_GET_PORT_NAME = 77,
OPCODE_COMMON_GET_CONFIG_SIGNATURE = 78,
@@ -1724,6 +1779,12 @@ struct mbx_set_common_iface_rx_filter {
} params;
};
+struct be_set_eqd {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t dm;
+};
+
/* [41] OPCODE_COMMON_MODIFY_EQ_DELAY */
struct mbx_modify_common_eq_delay {
struct mbx_hdr hdr;
@@ -1743,6 +1804,76 @@ struct mbx_modify_common_eq_delay {
} params;
};
+/* [32] OPCODE_COMMON_GET_CNTL_ATTRIBUTES */
+
+struct mgmt_hba_attr {
+ int8_t flashrom_ver_str[32];
+ int8_t manufac_name[32];
+ uint32_t supp_modes;
+ int8_t seeprom_ver_lo;
+ int8_t seeprom_ver_hi;
+ int8_t rsvd0[2];
+ uint32_t ioctl_data_struct_ver;
+ uint32_t ep_fw_data_struct_ver;
+ uint8_t ncsi_ver_str[12];
+ uint32_t def_ext_to;
+ int8_t cntl_mod_num[32];
+ int8_t cntl_desc[64];
+ int8_t cntl_ser_num[32];
+ int8_t ip_ver_str[32];
+ int8_t fw_ver_str[32];
+ int8_t bios_ver_str[32];
+ int8_t redboot_ver_str[32];
+ int8_t drv_ver_str[32];
+ int8_t fw_on_flash_ver_str[32];
+ uint32_t funcs_supp;
+ uint16_t max_cdblen;
+ uint8_t asic_rev;
+ uint8_t gen_guid[16];
+ uint8_t hba_port_count;
+ uint16_t default_link_down_timeout;
+ uint8_t iscsi_ver_min_max;
+ uint8_t multifunc_dev;
+ uint8_t cache_valid;
+ uint8_t hba_status;
+ uint8_t max_domains_supp;
+ uint8_t phy_port;
+ uint32_t fw_post_status;
+ uint32_t hba_mtu[8];
+ uint8_t iSCSI_feat;
+ uint8_t asic_gen;
+ uint8_t future_u8[2];
+ uint32_t future_u32[3];
+};
+
+struct mgmt_cntl_attr {
+ struct mgmt_hba_attr hba_attr;
+ uint16_t pci_vendor_id;
+ uint16_t pci_device_id;
+ uint16_t pci_sub_vendor_id;
+ uint16_t pci_sub_system_id;
+ uint8_t pci_bus_num;
+ uint8_t pci_dev_num;
+ uint8_t pci_func_num;
+ uint8_t interface_type;
+ uint64_t unique_id;
+ uint8_t netfilters;
+ uint8_t rsvd0[3];
+ uint32_t future_u32[4];
+};
+
+struct mbx_common_get_cntl_attr {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+ struct {
+ struct mgmt_cntl_attr cntl_attr_info;
+ } rsp;
+ } params;
+};
+
/* [59] OPCODE_ADD_COMMON_IFACE_MAC */
struct mbx_add_common_iface_mac {
struct mbx_hdr hdr;
@@ -1785,6 +1916,23 @@ struct ioctl_common_function_reset {
struct mbx_hdr hdr;
};
+/* [73] OPCODE_COMMON_READ_TRANSRECEIVER_DATA */
+struct mbx_read_common_transrecv_data {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t page_num;
+ uint32_t port;
+ } req;
+ struct {
+ uint32_t page_num;
+ uint32_t port;
+ uint32_t page_data[32];
+ } rsp;
+ } params;
+
+};
+
/* [80] OPCODE_COMMON_FUNCTION_LINK_CONFIG */
struct mbx_common_func_link_cfg {
struct mbx_hdr hdr;
@@ -2110,7 +2258,9 @@ enum RSS_ENABLE_FLAGS {
RSS_ENABLE_IPV4 = 0x1, /* (IPV4 HASH enabled ) */
RSS_ENABLE_TCP_IPV4 = 0x2, /* (TCP IPV4 Hash enabled) */
RSS_ENABLE_IPV6 = 0x4, /* (IPV6 HASH enabled) */
- RSS_ENABLE_TCP_IPV6 = 0x8 /* (TCP IPV6 HASH */
+ RSS_ENABLE_TCP_IPV6 = 0x8, /* (TCP IPV6 HASH */
+ RSS_ENABLE_UDP_IPV4 = 0x10, /* UDP IPV4 HASH */
+ RSS_ENABLE_UDP_IPV6 = 0x20 /* UDP IPV6 HASH */
};
#define RSS_ENABLE (RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV4)
#define RSS_DISABLE RSS_ENABLE_NONE
diff --git a/sys/dev/oce/oce_if.c b/sys/dev/oce/oce_if.c
index 426acd9..48cdb16 100644
--- a/sys/dev/oce/oce_if.c
+++ b/sys/dev/oce/oce_if.c
@@ -36,6 +36,7 @@
* Costa Mesa, CA 92626
*/
+
/* $FreeBSD$ */
#include "opt_inet6.h"
@@ -94,7 +95,8 @@ static void setup_max_queues_want(POCE_SOFTC sc);
static void update_queues_got(POCE_SOFTC sc);
static void process_link_state(POCE_SOFTC sc,
struct oce_async_cqe_link_state *acqe);
-
+static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
+static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
/* IP specific */
#if defined(INET6) || defined(INET)
@@ -266,8 +268,6 @@ oce_attach(device_t dev)
rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
if (rc)
goto stats_free;
-#ifdef DEV_NETMAP
-#endif /* DEV_NETMAP */
return 0;
@@ -485,7 +485,7 @@ oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
struct oce_wq *wq = NULL;
int queue_index = 0;
int status = 0;
-
+
if ((m->m_flags & M_FLOWID) != 0)
queue_index = m->m_pkthdr.flowid % sc->nwqs;
@@ -568,6 +568,7 @@ oce_intr(void *arg, int pending)
eq_arm:
oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
+
return;
}
@@ -633,6 +634,8 @@ oce_fast_isr(void *arg)
taskqueue_enqueue_fast(ii->tq, &ii->task);
+ ii->eq->intr++;
+
return FILTER_HANDLED;
}
@@ -780,6 +783,7 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
struct oce_nic_frag_wqe *nicfrag;
int num_wqes;
uint32_t reg_value;
+ boolean_t complete = TRUE;
m = *mpp;
if (!m)
@@ -790,6 +794,15 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
goto free_ret;
}
+ if(oce_tx_asic_stall_verify(sc, m)) {
+ m = oce_insert_vlan_tag(sc, m, &complete);
+ if(!m) {
+ device_printf(sc->dev, "Insertion unsuccessful\n");
+ return 0;
+ }
+
+ }
+
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
/* consolidate packet buffers for TSO/LSO segment offload */
#if defined(INET6) || defined(INET)
@@ -837,15 +850,15 @@ retry:
nichdr->u0.dw[2] = 0;
nichdr->u0.dw[3] = 0;
- nichdr->u0.s.complete = 1;
+ nichdr->u0.s.complete = complete;
nichdr->u0.s.event = 1;
nichdr->u0.s.crc = 1;
nichdr->u0.s.forward = 0;
nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
nichdr->u0.s.udpcs =
- (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
+ (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
nichdr->u0.s.tcpcs =
- (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
+ (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
nichdr->u0.s.num_wqe = num_wqes;
nichdr->u0.s.total_length = m->m_pkthdr.len;
if (m->m_flags & M_VLANTAG) {
@@ -895,7 +908,7 @@ retry:
wq->tx_stats.tx_wrbs += num_wqes;
wq->tx_stats.tx_bytes += m->m_pkthdr.len;
wq->tx_stats.tx_pkts++;
-
+
bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
reg_value = (num_wqes << 16) | wq->wq_id;
@@ -1081,6 +1094,9 @@ oce_start(struct ifnet *ifp)
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
+
+ if (!sc->link_status)
+ return;
do {
IF_DEQUEUE(&sc->ifp->if_snd, m);
@@ -1303,7 +1319,6 @@ oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
#if defined(INET6) || defined(INET)
/* Try to queue to LRO */
if (IF_LRO_ENABLED(sc) &&
- !(m->m_flags & M_VLANTAG) &&
(cqe->u0.s.ip_cksum_pass) &&
(cqe->u0.s.l4_cksum_pass) &&
(!cqe->u0.s.ip_ver) &&
@@ -1343,13 +1358,6 @@ oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
int num_frags = cqe->u0.s.num_fragments;
- if (IS_XE201(sc) && cqe->u0.s.error) {
- /* Lancer A0 workaround
- * num_frags will be 1 more than actual in case of error
- */
- if (num_frags)
- num_frags -= 1;
- }
for (i = 0; i < num_frags; i++) {
if (rq->packets_out == rq->packets_in) {
device_printf(sc->dev,
@@ -1458,7 +1466,7 @@ oce_free_lro(POCE_SOFTC sc)
tcp_lro_free(lro);
}
}
-#endif /* INET6 || INET */
+#endif
int
oce_alloc_rx_bufs(struct oce_rq *rq, int count)
@@ -1471,7 +1479,7 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
struct oce_nic_rqe *rqe;
pd_rxulp_db_t rxdb_reg;
-
+ bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
for (i = 0; i < count; i++) {
in = rq->packets_in + 1;
if (in == OCE_RQ_PACKET_ARRAY_SIZE)
@@ -1512,14 +1520,12 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
}
if (added != 0) {
for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
- DELAY(1);
rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
rxdb_reg.bits.qid = rq->rq_id;
OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
added -= OCE_MAX_RQ_POSTS;
}
if (added > 0) {
- DELAY(1);
rxdb_reg.bits.qid = rq->rq_id;
rxdb_reg.bits.num_posted = added;
OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
@@ -1554,13 +1560,8 @@ oce_rq_handler(void *arg)
} else {
rq->rx_stats.rxcp_err++;
sc->ifp->if_ierrors++;
- if (IS_XE201(sc))
- /* Lancer A0 no buffer workaround */
- oce_discard_rx_comp(rq, cqe);
- else
- /* Post L3/L4 errors to stack.*/
- oce_rx(rq, cqe->u0.s.frag_index, cqe);
-
+ /* Post L3/L4 errors to stack.*/
+ oce_rx(rq, cqe->u0.s.frag_index, cqe);
}
rq->rx_stats.rx_compl++;
cqe->u0.dw[2] = 0;
@@ -1757,18 +1758,18 @@ oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
uint32_t req_size;
struct mbx_hdr req;
OCE_DMA_MEM dma_mem;
-
+ struct mbx_common_get_cntl_attr *fw_cmd;
if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
return EFAULT;
-
+
if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
return EINVAL;
-
+
ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
return EFAULT;
-
+
req_size = le32toh(req.u0.req.request_length);
if (req_size > 65536)
return EINVAL;
@@ -1792,12 +1793,86 @@ oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
rc = EFAULT;
+ /*
+ firmware is filling all the attributes for this ioctl except
+ the driver version..so fill it
+ */
+ if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
+ fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
+ strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
+ COMPONENT_REVISION, strlen(COMPONENT_REVISION));
+ }
+
dma_free:
oce_dma_free(sc, &dma_mem);
return rc;
}
+static void
+oce_eqd_set_periodic(POCE_SOFTC sc)
+{
+ struct oce_set_eqd set_eqd[OCE_MAX_EQ];
+ struct oce_aic_obj *aic;
+ struct oce_eq *eqo;
+ uint64_t now = 0, delta;
+ int eqd, i, num = 0;
+ uint32_t ips = 0;
+ int tps;
+
+ for (i = 0 ; i < sc->neqs; i++) {
+ eqo = sc->eq[i];
+ aic = &sc->aic_obj[i];
+ /* When setting the static eq delay from the user space */
+ if (!aic->enable) {
+ eqd = aic->et_eqd;
+ goto modify_eqd;
+ }
+
+ now = ticks;
+
+ /* Over flow check */
+ if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
+ goto done;
+
+ delta = now - aic->ticks;
+ tps = delta/hz;
+
+ /* Interrupt rate based on elapsed ticks */
+ if(tps)
+ ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
+
+ if (ips > INTR_RATE_HWM)
+ eqd = aic->cur_eqd + 20;
+ else if (ips < INTR_RATE_LWM)
+ eqd = aic->cur_eqd / 2;
+ else
+ goto done;
+
+ if (eqd < 10)
+ eqd = 0;
+
+ /* Make sure that the eq delay is in the known range */
+ eqd = min(eqd, aic->max_eqd);
+ eqd = max(eqd, aic->min_eqd);
+
+modify_eqd:
+ if (eqd != aic->cur_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = eqo->eq_id;
+ aic->cur_eqd = eqd;
+ num++;
+ }
+done:
+ aic->intr_prev = eqo->intr;
+ aic->ticks = now;
+ }
+
+ /* Is there atleast one eq that needs to be modified? */
+ if(num)
+ oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
+
+}
static void
oce_local_timer(void *arg)
@@ -1813,6 +1888,10 @@ oce_local_timer(void *arg)
for (i = 0; i < sc->nwqs; i++)
oce_tx_restart(sc, sc->wq[i]);
+ /* calculate and set the eq delay for optimal interrupt rate */
+ if (IS_BE(sc))
+ oce_eqd_set_periodic(sc);
+
callout_reset(&sc->timer, hz, oce_local_timer, sc);
}
@@ -1849,17 +1928,17 @@ oce_if_deactivate(POCE_SOFTC sc)
/* Stop intrs and finish any bottom halves pending */
oce_hw_intr_disable(sc);
- /* Since taskqueue_drain takes a Giant Lock, We should not acquire
- any other lock. So unlock device lock and require after
- completing taskqueue_drain.
- */
- UNLOCK(&sc->dev_lock);
+ /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
+ any other lock. So unlock device lock and require after
+ completing taskqueue_drain.
+ */
+ UNLOCK(&sc->dev_lock);
for (i = 0; i < sc->intr_count; i++) {
if (sc->intrs[i].tq != NULL) {
taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
}
}
- LOCK(&sc->dev_lock);
+ LOCK(&sc->dev_lock);
/* Delete RX queue in card with flush param */
oce_stop_rx(sc);
@@ -1874,7 +1953,7 @@ oce_if_deactivate(POCE_SOFTC sc)
/* But still we need to get MCC aync events.
So enable intrs and also arm first EQ
- */
+ */
oce_hw_intr_enable(sc);
oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
@@ -1947,6 +2026,7 @@ oce_mq_handler(void *arg)
struct oce_mq_cqe *cqe;
struct oce_async_cqe_link_state *acqe;
struct oce_async_event_grp5_pvid_state *gcqe;
+ struct oce_async_event_qnq *dbgcqe;
bus_dmamap_sync(cq->ring->dma.tag,
@@ -1973,6 +2053,14 @@ oce_mq_handler(void *arg)
sc->pvid = 0;
}
+ else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
+ optype == ASYNC_EVENT_DEBUG_QNQ) {
+ dbgcqe =
+ (struct oce_async_event_qnq *)cqe;
+ if(dbgcqe->valid)
+ sc->qnqid = dbgcqe->vlan_tag;
+ sc->qnq_debug_event = TRUE;
+ }
}
cqe->u0.dw[3] = 0;
RING_GET(cq->ring, 1);
@@ -2032,3 +2120,79 @@ update_queues_got(POCE_SOFTC sc)
}
}
+static int
+oce_check_ipv6_ext_hdr(struct mbuf *m)
+{
+ struct ether_header *eh = mtod(m, struct ether_header *);
+ caddr_t m_datatemp = m->m_data;
+
+ if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
+ m->m_data += sizeof(struct ether_header);
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+
+ if((ip6->ip6_nxt != IPPROTO_TCP) && \
+ (ip6->ip6_nxt != IPPROTO_UDP)){
+ struct ip6_ext *ip6e = NULL;
+ m->m_data += sizeof(struct ip6_hdr);
+
+ ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
+ if(ip6e->ip6e_len == 0xff) {
+ m->m_data = m_datatemp;
+ return TRUE;
+ }
+ }
+ m->m_data = m_datatemp;
+ }
+ return FALSE;
+}
+
+static int
+is_be3_a1(POCE_SOFTC sc)
+{
+ if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static struct mbuf *
+oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
+{
+ uint16_t vlan_tag = 0;
+
+ if(!M_WRITABLE(m))
+ return NULL;
+
+ /* Embed vlan tag in the packet if it is not part of it */
+ if(m->m_flags & M_VLANTAG) {
+ vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
+ m->m_flags &= ~M_VLANTAG;
+ }
+
+ /* if UMC, ignore vlan tag insertion and instead insert pvid */
+ if(sc->pvid) {
+ if(!vlan_tag)
+ vlan_tag = sc->pvid;
+ *complete = FALSE;
+ }
+
+ if(vlan_tag) {
+ m = ether_vlanencap(m, vlan_tag);
+ }
+
+ if(sc->qnqid) {
+ m = ether_vlanencap(m, sc->qnqid);
+ *complete = FALSE;
+ }
+ return m;
+}
+
+static int
+oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
+{
+ if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
+ oce_check_ipv6_ext_hdr(m)) {
+ return TRUE;
+ }
+ return FALSE;
+}
diff --git a/sys/dev/oce/oce_if.h b/sys/dev/oce/oce_if.h
index 6428f88..ee684bd 100644
--- a/sys/dev/oce/oce_if.h
+++ b/sys/dev/oce/oce_if.h
@@ -36,6 +36,7 @@
* Costa Mesa, CA 92626
*/
+
/* $FreeBSD$ */
#include <sys/param.h>
@@ -87,9 +88,7 @@
#include "oce_hw.h"
-/* OCE device driver module component revision informaiton */
-#define COMPONENT_REVISION "4.2.127.0"
-
+#define COMPONENT_REVISION "4.6.95.0"
/* OCE devices supported by this driver */
#define PCI_VENDOR_EMULEX 0x10df /* Emulex */
@@ -132,7 +131,7 @@ extern int mp_ncpus; /* system's total active cpu cores */
#define OCE_RQ_BUF_SIZE 2048
#define OCE_LSO_MAX_SIZE (64 * 1024)
#define LONG_TIMEOUT 30
-#define OCE_MAX_JUMBO_FRAME_SIZE 16360
+#define OCE_MAX_JUMBO_FRAME_SIZE 9018
#define OCE_MAX_MTU (OCE_MAX_JUMBO_FRAME_SIZE - \
ETHER_VLAN_ENCAP_LEN - \
ETHER_HDR_LEN)
@@ -481,7 +480,27 @@ struct oce_drv_stats {
} u0;
};
+#define INTR_RATE_HWM 15000
+#define INTR_RATE_LWM 10000
+
+#define OCE_MAX_EQD 128u
+#define OCE_MIN_EQD 50u
+
+struct oce_set_eqd {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t delay_multiplier;
+};
+struct oce_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ boolean_t enable;
+ uint32_t min_eqd; /* in usecs */
+ uint32_t max_eqd; /* in usecs */
+ uint32_t cur_eqd; /* in usecs */
+ uint32_t et_eqd; /* configured value when aic is off */
+ uint64_t ticks;
+ uint64_t intr_prev;
+};
#define MAX_LOCK_DESC_LEN 32
struct oce_lock {
@@ -565,6 +584,7 @@ struct oce_eq {
int cq_valid;
struct eq_config eq_cfg;
int vector;
+ uint64_t intr;
};
enum cq_len {
@@ -827,6 +847,9 @@ typedef struct oce_softc {
uint32_t flow_control;
uint32_t promisc;
+
+ struct oce_aic_obj aic_obj[OCE_MAX_EQ];
+
/*Vlan Filtering related */
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
@@ -837,7 +860,9 @@ typedef struct oce_softc {
struct oce_drv_stats oce_stats_info;
struct callout timer;
int8_t be3_native;
- uint32_t pvid;
+ uint16_t qnq_debug_event;
+ uint16_t qnqid;
+ uint16_t pvid;
} OCE_SOFTC, *POCE_SOFTC;
@@ -996,6 +1021,9 @@ int oce_mbox_create_wq(struct oce_wq *wq);
int oce_mbox_create_eq(struct oce_eq *eq);
int oce_mbox_cq_create(struct oce_cq *cq, uint32_t ncoalesce,
uint32_t is_eventable);
+int oce_mbox_read_transrecv_data(POCE_SOFTC sc, uint32_t page_num);
+void oce_mbox_eqd_modify_periodic(POCE_SOFTC sc, struct oce_set_eqd *set_eqd,
+ int num);
void mbx_common_req_hdr_init(struct mbx_hdr *hdr,
uint8_t dom,
uint8_t port,
@@ -1076,3 +1104,12 @@ static inline uint32_t oce_highbit(uint32_t x)
return 0;
}
+#define TRANSCEIVER_DATA_NUM_ELE 64
+#define TRANSCEIVER_DATA_SIZE 256
+#define TRANSCEIVER_A0_SIZE 128
+#define TRANSCEIVER_A2_SIZE 128
+#define PAGE_NUM_A0 0xa0
+#define PAGE_NUM_A2 0xa2
+#define IS_QNQ_OR_UMC(sc) ((sc->pvid && (sc->function_mode & FNM_UMC_MODE ))\
+ || (sc->qnqid && (sc->function_mode & FNM_FLEX10_MODE)))
+
diff --git a/sys/dev/oce/oce_mbox.c b/sys/dev/oce/oce_mbox.c
index 07107a2..2db5934 100644
--- a/sys/dev/oce/oce_mbox.c
+++ b/sys/dev/oce/oce_mbox.c
@@ -37,10 +37,12 @@
*/
+
/* $FreeBSD$ */
-#include "oce_if.h"
+#include "oce_if.h"
+extern uint32_t sfp_vpd_dump_buffer[TRANSCEIVER_DATA_NUM_ELE];
/**
* @brief Reset (firmware) common function
@@ -276,12 +278,17 @@ oce_get_fw_version(POCE_SOFTC sc)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
ret = oce_mbox_post(sc, &mbx, NULL);
- if (ret)
- return ret;
+ if (!ret)
+ ret = fwcmd->hdr.u0.rsp.status;
+ if (ret) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, ret);
+ goto error;
+ }
bcopy(fwcmd->params.rsp.fw_ver_str, sc->fw_version, 32);
-
- return 0;
+error:
+ return ret;
}
@@ -428,15 +435,20 @@ oce_read_mac_addr(POCE_SOFTC sc, uint32_t if_id,
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
ret = oce_mbox_post(sc, &mbx, NULL);
- if (ret)
- return ret;
+ if (!ret)
+ ret = fwcmd->hdr.u0.rsp.status;
+ if (ret) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, ret);
+ goto error;
+ }
/* copy the mac addres in the output parameter */
mac->size_of_struct = fwcmd->params.rsp.mac.size_of_struct;
bcopy(&fwcmd->params.rsp.mac.mac_addr[0], &mac->mac_addr[0],
mac->size_of_struct);
-
- return 0;
+error:
+ return ret;
}
/**
@@ -466,8 +478,13 @@ oce_get_fw_config(POCE_SOFTC sc)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
ret = oce_mbox_post(sc, &mbx, NULL);
- if (ret)
- return ret;
+ if (!ret)
+ ret = fwcmd->hdr.u0.rsp.status;
+ if (ret) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, ret);
+ goto error;
+ }
DW_SWAP(u32ptr(fwcmd), sizeof(struct mbx_common_query_fw_config));
@@ -485,7 +502,8 @@ oce_get_fw_config(POCE_SOFTC sc)
sc->max_rx_rings = fwcmd->params.rsp.ulp[1].lro_rqid_tot;
}
- return 0;
+error:
+ return ret;
}
@@ -540,15 +558,20 @@ oce_if_create(POCE_SOFTC sc,
DW_SWAP(u32ptr(&mbx), OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
- return rc;
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
+ }
*if_id = LE_32(fwcmd->params.rsp.if_id);
if (mac_addr != NULL)
sc->pmac_id = LE_32(fwcmd->params.rsp.pmac_id);
-
- return 0;
+error:
+ return rc;
}
/**
@@ -581,6 +604,11 @@ oce_if_del(POCE_SOFTC sc, uint32_t if_id)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -628,8 +656,12 @@ oce_config_vlan(POCE_SOFTC sc,
DW_SWAP(u32ptr(&mbx), (OCE_BMBX_RHDR_SZ + mbx.payload_length));
rc = oce_mbox_post(sc, &mbx, NULL);
-
- return rc;
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ return 0;
}
@@ -667,7 +699,11 @@ oce_set_flow_control(POCE_SOFTC sc, uint32_t flow_control)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -726,20 +762,28 @@ oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss)
struct oce_mbx mbx;
struct mbx_config_nic_rss *fwcmd =
(struct mbx_config_nic_rss *)&mbx.payload;
+ int version;
bzero(&mbx, sizeof(struct oce_mbx));
+ if (IS_XE201(sc)) {
+ version = OCE_MBX_VER_V1;
+ fwcmd->params.req.enable_rss = RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
+ } else
+ version = OCE_MBX_VER_V0;
+
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_NIC,
NIC_CONFIG_RSS,
MBX_TIMEOUT_SEC,
sizeof(struct mbx_config_nic_rss),
- OCE_MBX_VER_V0);
+ version);
if (enable_rss)
- fwcmd->params.req.enable_rss = (RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV4 |
- RSS_ENABLE_IPV6 |
- RSS_ENABLE_TCP_IPV6);
+ fwcmd->params.req.enable_rss |= (RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV4 |
+ RSS_ENABLE_IPV6 |
+ RSS_ENABLE_TCP_IPV6);
fwcmd->params.req.flush = OCE_FLUSH;
fwcmd->params.req.if_id = LE_32(if_id);
@@ -753,9 +797,12 @@ oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
}
-
return rc;
}
@@ -834,7 +881,12 @@ oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
- return rc;
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ return 0;
}
/**
@@ -848,17 +900,19 @@ oce_get_link_status(POCE_SOFTC sc, struct link_status *link)
{
struct oce_mbx mbx;
struct mbx_query_common_link_config *fwcmd;
- int rc = 0;
+ int rc = 0, version;
bzero(&mbx, sizeof(struct oce_mbx));
+ IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
+
fwcmd = (struct mbx_query_common_link_config *)&mbx.payload;
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_LINK_CONFIG,
MBX_TIMEOUT_SEC,
sizeof(struct mbx_query_common_link_config),
- OCE_MBX_VER_V0);
+ version);
mbx.u0.s.embedded = 1;
mbx.payload_length = sizeof(struct mbx_query_common_link_config);
@@ -866,15 +920,18 @@ oce_get_link_status(POCE_SOFTC sc, struct link_status *link)
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
if (rc) {
- device_printf(sc->dev, "Could not get link speed: %d\n", rc);
- } else {
- /* interpret response */
- bcopy(&fwcmd->params.rsp, link, sizeof(struct link_status));
- link->logical_link_status = LE_32(link->logical_link_status);
- link->qos_link_speed = LE_16(link->qos_link_speed);
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
}
-
+ /* interpret response */
+ bcopy(&fwcmd->params.rsp, link, sizeof(struct link_status));
+ link->logical_link_status = LE_32(link->logical_link_status);
+ link->qos_link_speed = LE_16(link->qos_link_speed);
+error:
return rc;
}
@@ -916,11 +973,11 @@ oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
- if (rc) {
- device_printf(sc->dev,
- "Could not get nic statistics: %d\n", rc);
- }
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -966,10 +1023,11 @@ oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
rc = oce_mbox_post(sc, &mbx, NULL);
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
- if (rc) {
- device_printf(sc->dev,
- "Could not get nic statistics: %d\n", rc);
- }
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -1001,7 +1059,7 @@ oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
OCE_MBX_VER_V0);
fwcmd->params.req.reset_stats = reset_stats;
- fwcmd->params.req.port_number = sc->if_id;
+ fwcmd->params.req.port_number = sc->port_id;
mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
mbx.u0.s.sge_count = 1; /* using scatter gather instead */
@@ -1017,11 +1075,11 @@ oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
rc = oce_mbox_post(sc, &mbx, NULL);
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
- if (rc != 0) {
- device_printf(sc->dev,
- "Could not get physical port statistics: %d\n", rc);
- }
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -1070,11 +1128,11 @@ oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
rc = oce_mbox_post(sc, &mbx, NULL);
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
- if (rc != 0) {
- device_printf(sc->dev,
- "Could not get physical port statistics: %d\n", rc);
- }
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -1115,7 +1173,11 @@ oce_update_multicast(POCE_SOFTC sc, POCE_DMA_MEM pdma_mem)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
-
+ if (!rc)
+ rc = req->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -1176,11 +1238,15 @@ oce_mbox_macaddr_add(POCE_SOFTC sc, uint8_t *mac_addr,
mbx.payload_length = sizeof(struct mbx_add_common_iface_mac);
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
- return rc;
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
+ }
*pmac_id = fwcmd->params.rsp.pmac_id;
-
+error:
return rc;
}
@@ -1210,6 +1276,11 @@ oce_mbox_macaddr_del(POCE_SOFTC sc, uint32_t if_id, uint32_t pmac_id)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
@@ -1242,12 +1313,17 @@ oce_mbox_check_native_mode(POCE_SOFTC sc)
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
- //if (rc != 0) This can fail in legacy mode. So skip
- // FN_LEAVE(rc);
-
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
+ }
sc->be3_native = fwcmd->params.rsp.capability_flags
& CAP_BE3_NATIVE_ERX_API;
+error:
return 0;
}
@@ -1282,6 +1358,11 @@ oce_mbox_cmd_set_loopback(POCE_SOFTC sc, uint8_t port_num,
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
@@ -1320,10 +1401,13 @@ oce_mbox_cmd_test_loopback(POCE_SOFTC sc, uint32_t port_num,
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
if (rc)
- return rc;
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
- return(fwcmd->params.rsp.status);
+ return rc;
}
int
@@ -1362,11 +1446,11 @@ oce_mbox_write_flashrom(POCE_SOFTC sc, uint32_t optype,uint32_t opcode,
/* post the command */
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc) {
- device_printf(sc->dev, "Write FlashROM mbox post failed\n");
- } else {
- rc = fwcmd->hdr.u0.rsp.status;
- }
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
@@ -1408,12 +1492,15 @@ oce_mbox_get_flashrom_crc(POCE_SOFTC sc, uint8_t *flash_crc,
/* post the command */
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
if (rc) {
- device_printf(sc->dev, "Read FlashROM CRC mbox post failed\n");
- } else {
- bcopy(fwcmd->data_buffer, flash_crc, 4);
- rc = fwcmd->hdr.u0.rsp.status;
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
}
+ bcopy(fwcmd->data_buffer, flash_crc, 4);
+error:
return rc;
}
@@ -1440,20 +1527,22 @@ oce_mbox_get_phy_info(POCE_SOFTC sc, struct oce_phy_info *phy_info)
/* now post the command */
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
if (rc) {
- device_printf(sc->dev, "Read PHY info mbox post failed\n");
- } else {
- rc = fwcmd->hdr.u0.rsp.status;
- phy_info->phy_type = fwcmd->params.rsp.phy_info.phy_type;
- phy_info->interface_type =
- fwcmd->params.rsp.phy_info.interface_type;
- phy_info->auto_speeds_supported =
- fwcmd->params.rsp.phy_info.auto_speeds_supported;
- phy_info->fixed_speeds_supported =
- fwcmd->params.rsp.phy_info.fixed_speeds_supported;
- phy_info->misc_params =fwcmd->params.rsp.phy_info.misc_params;
-
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
}
+ phy_info->phy_type = fwcmd->params.rsp.phy_info.phy_type;
+ phy_info->interface_type =
+ fwcmd->params.rsp.phy_info.interface_type;
+ phy_info->auto_speeds_supported =
+ fwcmd->params.rsp.phy_info.auto_speeds_supported;
+ phy_info->fixed_speeds_supported =
+ fwcmd->params.rsp.phy_info.fixed_speeds_supported;
+ phy_info->misc_params =fwcmd->params.rsp.phy_info.misc_params;
+error:
return rc;
}
@@ -1499,14 +1588,16 @@ oce_mbox_lancer_write_flashrom(POCE_SOFTC sc, uint32_t data_size,
/* post the command */
rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->params.rsp.status;
if (rc) {
- device_printf(sc->dev,
- "Write Lancer FlashROM mbox post failed\n");
- } else {
- *written_data = fwcmd->params.rsp.actual_write_length;
- *additional_status = fwcmd->params.rsp.additional_status;
- rc = fwcmd->params.rsp.status;
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
}
+ *written_data = fwcmd->params.rsp.actual_write_length;
+ *additional_status = fwcmd->params.rsp.additional_status;
+error:
return rc;
}
@@ -1553,15 +1644,16 @@ oce_mbox_create_rq(struct oce_rq *rq)
mbx.payload_length = sizeof(struct mbx_create_nic_rq);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
goto error;
-
+ }
rq->rq_id = fwcmd->params.rsp.rq_id;
rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid;
-
- return 0;
error:
- device_printf(sc->dev, "Mbox Create RQ failed\n");
return rc;
}
@@ -1603,14 +1695,15 @@ oce_mbox_create_wq(struct oce_wq *wq)
mbx.payload_length = sizeof(struct mbx_create_nic_wq);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
goto error;
-
+ }
wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
-
- return 0;
error:
- device_printf(sc->dev, "Mbox Create WQ failed\n");
return rc;
}
@@ -1649,14 +1742,15 @@ oce_mbox_create_eq(struct oce_eq *eq)
mbx.payload_length = sizeof(struct mbx_create_common_eq);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
goto error;
-
+ }
eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
-
- return 0;
error:
- device_printf(sc->dev, "Mbox Create EQ failed\n");
return rc;
}
@@ -1726,14 +1820,125 @@ oce_mbox_cq_create(struct oce_cq *cq, uint32_t ncoalesce, uint32_t is_eventable)
mbx.payload_length = sizeof(struct mbx_create_common_cq);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
goto error;
-
+ }
cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
+error:
+ return rc;
- return 0;
+}
+
+int
+oce_mbox_read_transrecv_data(POCE_SOFTC sc, uint32_t page_num)
+{
+ int rc = 0;
+ struct oce_mbx mbx;
+ struct mbx_read_common_transrecv_data *fwcmd;
+ struct oce_mq_sge *sgl;
+ OCE_DMA_MEM dma;
+
+ /* Allocate DMA mem*/
+ if (oce_dma_alloc(sc, sizeof(struct mbx_read_common_transrecv_data),
+ &dma, 0))
+ return ENOMEM;
+
+ fwcmd = OCE_DMAPTR(&dma, struct mbx_read_common_transrecv_data);
+ bzero(fwcmd, sizeof(struct mbx_read_common_transrecv_data));
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_TRANSRECEIVER_DATA,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_read_common_transrecv_data),
+ OCE_MBX_VER_V0);
+
+ /* fill rest of mbx */
+ mbx.u0.s.embedded = 0;
+ mbx.payload_length = sizeof(struct mbx_read_common_transrecv_data);
+ mbx.u0.s.sge_count = 1;
+ sgl = &mbx.payload.u0.u1.sgl[0];
+ sgl->pa_hi = htole32(upper_32_bits(dma.paddr));
+ sgl->pa_lo = htole32((dma.paddr) & 0xFFFFFFFF);
+ sgl->length = htole32(mbx.payload_length);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ fwcmd->params.req.port = LE_32(sc->port_id);
+ fwcmd->params.req.page_num = LE_32(page_num);
+
+ /* command post */
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
+ goto error;
+ }
+ if(fwcmd->params.rsp.page_num == PAGE_NUM_A0)
+ {
+ bcopy((char *)fwcmd->params.rsp.page_data,
+ (char *)&sfp_vpd_dump_buffer[0],
+ TRANSCEIVER_A0_SIZE);
+ }
+
+ if(fwcmd->params.rsp.page_num == PAGE_NUM_A2)
+ {
+ bcopy((char *)fwcmd->params.rsp.page_data,
+ (char *)&sfp_vpd_dump_buffer[32],
+ TRANSCEIVER_A2_SIZE);
+ }
error:
- device_printf(sc->dev, "Mbox Create CQ failed\n");
return rc;
+}
+void
+oce_mbox_eqd_modify_periodic(POCE_SOFTC sc, struct oce_set_eqd *set_eqd,
+ int num)
+{
+ struct oce_mbx mbx;
+ struct mbx_modify_common_eq_delay *fwcmd;
+ int rc = 0;
+ int i = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ /* Initialize MODIFY_EQ_DELAY ioctl header */
+ fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_modify_common_eq_delay),
+ OCE_MBX_VER_V0);
+ /* fill rest of mbx */
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_modify_common_eq_delay);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ fwcmd->params.req.num_eq = num;
+ for (i = 0; i < num; i++) {
+ fwcmd->params.req.delay[i].eq_id =
+ htole32(set_eqd[i].eq_id);
+ fwcmd->params.req.delay[i].phase = 0;
+ fwcmd->params.req.delay[i].dm =
+ htole32(set_eqd[i].delay_multiplier);
+ }
+
+
+ /* command post */
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
}
+
+
diff --git a/sys/dev/oce/oce_queue.c b/sys/dev/oce/oce_queue.c
index 580853e..82972ee 100644
--- a/sys/dev/oce/oce_queue.c
+++ b/sys/dev/oce/oce_queue.c
@@ -36,8 +36,11 @@
* Costa Mesa, CA 92626
*/
+
+
/* $FreeBSD$ */
+
#include "oce_if.h"
/*****************************************************
@@ -89,6 +92,7 @@ oce_queue_init_all(POCE_SOFTC sc)
int rc = 0, i, vector;
struct oce_wq *wq;
struct oce_rq *rq;
+ struct oce_aic_obj *aic;
/* alloc TX/RX queues */
for_all_wq_queues(sc, wq, i) {
@@ -113,6 +117,13 @@ oce_queue_init_all(POCE_SOFTC sc)
/* create all of the event queues */
for (vector = 0; vector < sc->intr_count; vector++) {
+ /* setup aic defaults for each event queue */
+ aic = &sc->aic_obj[vector];
+ aic->max_eqd = OCE_MAX_EQD;
+ aic->min_eqd = OCE_MIN_EQD;
+ aic->et_eqd = OCE_MIN_EQD;
+ aic->enable = TRUE;
+
sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
0, vector);
if (!sc->eq[vector])
@@ -657,8 +668,7 @@ oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
oce_mq_ext_ctx_t *ctx;
uint32_t num_pages;
uint32_t page_size;
- uint32_t version;
-
+ int version;
cq = oce_cq_create(sc, eq, CQ_LEN_256,
sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
@@ -680,8 +690,8 @@ oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
bzero(&mbx, sizeof(struct oce_mbx));
+ IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
- version = OCE_MBX_VER_V0;
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_COMMON,
OPCODE_COMMON_CREATE_MQ_EXT,
@@ -693,21 +703,41 @@ oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
page_size = mq->ring->num_items * mq->ring->item_size;
ctx = &fwcmd->params.req.context;
- ctx->v0.num_pages = num_pages;
- ctx->v0.cq_id = cq->cq_id;
- ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
- ctx->v0.valid = 1;
- /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
- ctx->v0.async_evt_bitmap = 0xffffffff;
+
+ if (IS_XE201(sc)) {
+ ctx->v1.num_pages = num_pages;
+ ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
+ ctx->v1.cq_id = cq->cq_id;
+ ctx->v1.valid = 1;
+ ctx->v1.async_cq_id = cq->cq_id;
+ ctx->v1.async_cq_valid = 1;
+ /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
+ ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
+ ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
+ ctx->v1.async_evt_bitmap |=
+ LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
+ }
+ else {
+ ctx->v0.num_pages = num_pages;
+ ctx->v0.cq_id = cq->cq_id;
+ ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
+ ctx->v0.valid = 1;
+ /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
+ ctx->v0.async_evt_bitmap = 0xffffffff;
+ }
mbx.u0.s.embedded = 1;
mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
- if (rc)
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
goto error;
-
+ }
mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
mq->cq = cq;
eq->cq[eq->cq_valid] = cq;
@@ -824,10 +854,11 @@ oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, mbx, NULL);
-
- if (rc != 0)
- device_printf(sc->dev, "Failed to del q\n");
-
+ if (!rc)
+ rc = hdr->u0.rsp.status;
+ if (rc)
+ device_printf(sc->dev,"%s failed - cmd status: %d\n",
+ __FUNCTION__, rc);
return rc;
}
diff --git a/sys/dev/oce/oce_sysctl.c b/sys/dev/oce/oce_sysctl.c
index 93117f6..d8a88de 100644
--- a/sys/dev/oce/oce_sysctl.c
+++ b/sys/dev/oce/oce_sysctl.c
@@ -38,6 +38,9 @@
/* $FreeBSD$ */
+/* $FreeBSD$ */
+
+
#include "oce_if.h"
static void copy_stats_to_sc_xe201(POCE_SOFTC sc);
@@ -49,6 +52,7 @@ static int oce_sys_fwupgrade(SYSCTL_HANDLER_ARGS);
static int oce_be3_flashdata(POCE_SOFTC sc, const struct firmware
*fw, int num_imgs);
static int oce_lancer_fwupgrade(POCE_SOFTC sc, const struct firmware *fw);
+static int oce_sysctl_sfp_vpd_dump(SYSCTL_HANDLER_ARGS);
static boolean_t oce_phy_flashing_required(POCE_SOFTC sc);
static boolean_t oce_img_flashing_required(POCE_SOFTC sc, const char *p,
int img_optype, uint32_t img_offset,
@@ -61,7 +65,7 @@ static void oce_add_stats_sysctls_xe201(POCE_SOFTC sc,
struct sysctl_oid *stats_node);
extern char component_revision[32];
-
+uint32_t sfp_vpd_dump_buffer[TRANSCEIVER_DATA_NUM_ELE];
void
oce_add_sysctls(POCE_SOFTC sc)
@@ -93,7 +97,8 @@ oce_add_sysctls(POCE_SOFTC sc)
sizeof(oce_max_rsp_handled),
"Maximum receive frames handled per interupt");
- if (sc->function_mode & FNM_FLEX10_MODE)
+ if ((sc->function_mode & FNM_FLEX10_MODE) ||
+ (sc->function_mode & FNM_UMC_MODE))
SYSCTL_ADD_UINT(ctx, child,
OID_AUTO, "speed",
CTLFLAG_RD,
@@ -121,6 +126,19 @@ oce_add_sysctls(POCE_SOFTC sc)
CTLTYPE_STRING | CTLFLAG_RW, (void *)sc, 0,
oce_sys_fwupgrade, "A", "Firmware ufi file");
+ /*
+ * Dumps Transceiver data
+ * "sysctl dev.oce.0.sfp_vpd_dump=0"
+ * "sysctl -x dev.oce.0.sfp_vpd_dump_buffer" for hex dump
+ * "sysctl -b dev.oce.0.sfp_vpd_dump_buffer > sfp.bin" for binary dump
+ */
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sfp_vpd_dump",
+ CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, oce_sysctl_sfp_vpd_dump,
+ "I", "Initiate a sfp_vpd_dump operation");
+ SYSCTL_ADD_OPAQUE(ctx, child, OID_AUTO, "sfp_vpd_dump_buffer",
+ CTLFLAG_RD, sfp_vpd_dump_buffer,
+ TRANSCEIVER_DATA_SIZE, "IU", "Access sfp_vpd_dump buffer");
+
stats_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD, NULL, "Ethernet Statistics");
@@ -133,7 +151,6 @@ oce_add_sysctls(POCE_SOFTC sc)
}
-
static uint32_t
oce_loopback_test(struct oce_softc *sc, uint8_t loopback_type)
{
@@ -147,7 +164,6 @@ oce_loopback_test(struct oce_softc *sc, uint8_t loopback_type)
return status;
}
-
static int
oce_sysctl_loopback(SYSCTL_HANDLER_ARGS)
{
@@ -1303,3 +1319,31 @@ oce_refresh_nic_stats(POCE_SOFTC sc)
return rc;
}
+
+static int
+oce_sysctl_sfp_vpd_dump(SYSCTL_HANDLER_ARGS)
+{
+ int result = 0, error;
+ int rc = 0;
+ POCE_SOFTC sc = (POCE_SOFTC) arg1;
+
+ /* sysctl default handler */
+ error = sysctl_handle_int(oidp, &result, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ if(result == -1) {
+ return EINVAL;
+ }
+ bzero((char *)sfp_vpd_dump_buffer, TRANSCEIVER_DATA_SIZE);
+
+ rc = oce_mbox_read_transrecv_data(sc, PAGE_NUM_A0);
+ if(rc)
+ return rc;
+
+ rc = oce_mbox_read_transrecv_data(sc, PAGE_NUM_A2);
+ if(rc)
+ return rc;
+
+ return rc;
+}
diff --git a/sys/dev/oce/oce_util.c b/sys/dev/oce/oce_util.c
index 7b227ac..b71c02d 100644
--- a/sys/dev/oce/oce_util.c
+++ b/sys/dev/oce/oce_util.c
@@ -36,8 +36,10 @@
* Costa Mesa, CA 92626
*/
+
/* $FreeBSD$ */
+
#include "oce_if.h"
static void oce_dma_map_ring(void *arg,
@@ -71,7 +73,8 @@ oce_dma_alloc(POCE_SOFTC sc, bus_size_t size, POCE_DMA_MEM dma, int flags)
if (rc == 0) {
rc = bus_dmamem_alloc(dma->tag,
&dma->ptr,
- BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT |
+ BUS_DMA_ZERO,
&dma->map);
}
diff --git a/sys/dev/random/randomdev_soft.c b/sys/dev/random/randomdev_soft.c
index 420a82a..004066eb 100644
--- a/sys/dev/random/randomdev_soft.c
+++ b/sys/dev/random/randomdev_soft.c
@@ -242,10 +242,10 @@ random_kthread(void *arg __unused)
local_count = 0;
/* Process until told to stop */
+ mtx_lock_spin(&harvest_mtx);
for (; random_kthread_control >= 0;) {
/* Cycle through all the entropy sources */
- mtx_lock_spin(&harvest_mtx);
for (source = RANDOM_START; source < ENTROPYSOURCE; source++) {
/*
* Drain entropy source records into a thread-local
@@ -270,7 +270,6 @@ random_kthread(void *arg __unused)
emptyfifo.count += local_count;
local_count = 0;
}
- mtx_unlock_spin(&harvest_mtx);
KASSERT(local_count == 0, ("random_kthread: local_count %d",
local_count));
@@ -283,9 +282,11 @@ random_kthread(void *arg __unused)
random_kthread_control = 0;
/* Work done, so don't belabour the issue */
- pause("-", hz / 10);
+ msleep_spin_sbt(&random_kthread_control, &harvest_mtx,
+ "-", SBT_1S / 10, 0, C_PREL(1));
}
+ mtx_unlock_spin(&harvest_mtx);
random_set_wakeup_exit(&random_kthread_control);
/* NOTREACHED */
diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c
index 41d51d7..ef3124a 100644
--- a/sys/dev/sound/pci/hda/hdaa_patches.c
+++ b/sys/dev/sound/pci/hda/hdaa_patches.c
@@ -333,6 +333,23 @@ hdac_pin_patch(struct hdaa_widget *w)
patch = "as=1 seq=15";
break;
}
+ } else if (id == HDA_CODEC_CX20590 &&
+ (subid == LENOVO_X1_SUBVENDOR ||
+ subid == LENOVO_X220_SUBVENDOR ||
+ subid == LENOVO_T420_SUBVENDOR ||
+ subid == LENOVO_T520_SUBVENDOR)) {
+ switch (nid) {
+ case 25:
+ patch = "as=1 seq=15";
+ break;
+ }
+ } else if (id == HDA_CODEC_ALC269 &&
+ subid == LENOVO_X1CRBN_SUBVENDOR) {
+ switch (nid) {
+ case 21:
+ patch = "as=1 seq=15";
+ break;
+ }
}
if (patch != NULL)
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 9f9d687..e82b6ae 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -220,7 +220,12 @@
#define LENOVO_3KN200_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x384e)
#define LENOVO_B450_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x3a0d)
#define LENOVO_TCA55_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x1015)
+#define LENOVO_X1_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21e8)
+#define LENOVO_X1CRBN_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21f9)
+#define LENOVO_X220_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21da)
#define LENOVO_X300_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x20ac)
+#define LENOVO_T420_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21ce)
+#define LENOVO_T520_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21cf)
#define LENOVO_ALL_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0xffff)
/* Samsung */
diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c
index 55bf4af..c9282c9 100644
--- a/sys/dev/sound/pci/hda/hdacc.c
+++ b/sys/dev/sound/pci/hda/hdacc.c
@@ -460,8 +460,12 @@ hdacc_attach(device_t dev)
static int
hdacc_detach(device_t dev)
{
+ struct hdacc_softc *codec = device_get_softc(dev);
+ int error;
- return (device_delete_children(dev));
+ error = device_delete_children(dev);
+ free(codec->fgs, M_HDACC);
+ return (error);
}
static int
diff --git a/sys/dev/syscons/syscons.c b/sys/dev/syscons/syscons.c
index 8094ea3..b863f90 100644
--- a/sys/dev/syscons/syscons.c
+++ b/sys/dev/syscons/syscons.c
@@ -506,6 +506,8 @@ sc_attach_unit(int unit, int flags)
sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE);
sc->config = flags;
+ callout_init(&sc->ctimeout, 0);
+ callout_init(&sc->cblink, 0);
scp = sc_get_stat(sc->dev[0]);
if (sc_console == NULL) /* sc_console_unit < 0 */
sc_console = scp;
@@ -1831,13 +1833,11 @@ static void
scrn_timer(void *arg)
{
#ifndef PC98
- static int kbd_interval = 0;
+ static time_t kbd_time_stamp = 0;
#endif
- struct timeval tv;
sc_softc_t *sc;
scr_stat *scp;
- int again;
- int s;
+ int again, rate;
again = (arg != NULL);
if (arg != NULL)
@@ -1847,18 +1847,18 @@ scrn_timer(void *arg)
else
return;
+ /* find the vty to update */
+ scp = sc->cur_scp;
+
/* don't do anything when we are performing some I/O operations */
- if (suspend_in_progress || sc->font_loading_in_progress) {
- if (again)
- timeout(scrn_timer, sc, hz / 10);
- return;
- }
- s = spltty();
+ if (suspend_in_progress || sc->font_loading_in_progress)
+ goto done;
#ifndef PC98
if ((sc->kbd == NULL) && (sc->config & SC_AUTODETECT_KBD)) {
/* try to allocate a keyboard automatically */
- if (++kbd_interval >= 25) {
+ if (kbd_time_stamp != time_uptime) {
+ kbd_time_stamp = time_uptime;
sc->keyboard = sc_allocate_keyboard(sc, -1);
if (sc->keyboard >= 0) {
sc->kbd = kbd_get_keyboard(sc->keyboard);
@@ -1867,25 +1867,20 @@ scrn_timer(void *arg)
update_kbd_state(sc->cur_scp, sc->cur_scp->status,
LOCK_MASK);
}
- kbd_interval = 0;
}
}
#endif /* PC98 */
- /* find the vty to update */
- scp = sc->cur_scp;
-
/* should we stop the screen saver? */
- getmicrouptime(&tv);
if (debugger > 0 || panicstr || shutdown_in_progress)
sc_touch_scrn_saver();
if (run_scrn_saver) {
- if (tv.tv_sec > sc->scrn_time_stamp + scrn_blank_time)
+ if (time_uptime > sc->scrn_time_stamp + scrn_blank_time)
sc->flags |= SC_SCRN_IDLE;
else
sc->flags &= ~SC_SCRN_IDLE;
} else {
- sc->scrn_time_stamp = tv.tv_sec;
+ sc->scrn_time_stamp = time_uptime;
sc->flags &= ~SC_SCRN_IDLE;
if (scrn_blank_time > 0)
run_scrn_saver = TRUE;
@@ -1898,12 +1893,8 @@ scrn_timer(void *arg)
/* should we just return ? */
if (sc->blink_in_progress || sc->switch_in_progress
- || sc->write_in_progress) {
- if (again)
- timeout(scrn_timer, sc, hz / 10);
- splx(s);
- return;
- }
+ || sc->write_in_progress)
+ goto done;
/* Update the screen */
scp = sc->cur_scp; /* cur_scp may have changed... */
@@ -1917,9 +1908,19 @@ scrn_timer(void *arg)
(*current_saver)(sc, TRUE);
#endif
- if (again)
- timeout(scrn_timer, sc, hz / 25);
- splx(s);
+done:
+ if (again) {
+ /*
+ * Use reduced "refresh" rate if we are in graphics and that is not a
+ * graphical screen saver. In such case we just have nothing to do.
+ */
+ if (ISGRAPHSC(scp) && !(sc->flags & SC_SCRN_BLANKED))
+ rate = 2;
+ else
+ rate = 30;
+ callout_reset_sbt(&sc->ctimeout, SBT_1S / rate, 0,
+ scrn_timer, sc, C_PREL(1));
+ }
}
static int
@@ -3863,7 +3864,8 @@ blink_screen(void *arg)
(*scp->rndr->draw)(scp, 0, scp->xsize*scp->ysize,
scp->sc->blink_in_progress & 1);
scp->sc->blink_in_progress--;
- timeout(blink_screen, scp, hz / 10);
+ callout_reset_sbt(&scp->sc->cblink, SBT_1S / 15, 0,
+ blink_screen, scp, C_PREL(0));
}
}
diff --git a/sys/dev/syscons/syscons.h b/sys/dev/syscons/syscons.h
index 23a7bba..353b67f 100644
--- a/sys/dev/syscons/syscons.h
+++ b/sys/dev/syscons/syscons.h
@@ -269,6 +269,8 @@ typedef struct sc_softc {
#ifdef KDB
int sc_altbrk;
#endif
+ struct callout ctimeout;
+ struct callout cblink;
} sc_softc_t;
/* virtual screen */
diff --git a/sys/dev/virtio/block/virtio_blk.c b/sys/dev/virtio/block/virtio_blk.c
index cb628f6..21d7703 100644
--- a/sys/dev/virtio/block/virtio_blk.c
+++ b/sys/dev/virtio/block/virtio_blk.c
@@ -72,6 +72,7 @@ struct vtblk_softc {
#define VTBLK_FLAG_DETACH 0x0004
#define VTBLK_FLAG_SUSPEND 0x0008
#define VTBLK_FLAG_DUMPING 0x0010
+#define VTBLK_FLAG_BARRIER 0x0020
struct virtqueue *vtblk_vq;
struct sglist *vtblk_sglist;
@@ -81,7 +82,8 @@ struct vtblk_softc {
TAILQ_HEAD(, vtblk_request)
vtblk_req_free;
TAILQ_HEAD(, vtblk_request)
- vtblk_req_ready;
+ vtblk_req_ready;
+ struct vtblk_request *vtblk_req_ordered;
struct taskqueue *vtblk_tq;
struct task vtblk_intr_task;
@@ -278,9 +280,10 @@ vtblk_attach(device_t dev)
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
-
if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
sc->vtblk_flags |= VTBLK_FLAG_READONLY;
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
+ sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
/* Get local copy of config. */
virtio_read_device_config(dev, 0, &blkcfg,
@@ -766,25 +769,45 @@ vtblk_bio_request(struct vtblk_softc *sc)
bp->bio_cmd);
}
- if (bp->bio_flags & BIO_ORDERED)
- req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
-
return (req);
}
static int
vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
{
+ struct virtqueue *vq;
struct sglist *sg;
struct bio *bp;
- int readable, writable, error;
+ int ordered, readable, writable, error;
+ vq = sc->vtblk_vq;
sg = sc->vtblk_sglist;
bp = req->vbr_bp;
+ ordered = 0;
writable = 0;
VTBLK_LOCK_ASSERT(sc);
+ /*
+ * Wait until the ordered request completes before
+ * executing subsequent requests.
+ */
+ if (sc->vtblk_req_ordered != NULL)
+ return (EBUSY);
+
+ if (bp->bio_flags & BIO_ORDERED) {
+ if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
+ /*
+ * This request will be executed once all
+ * the in-flight requests are completed.
+ */
+ if (!virtqueue_empty(vq))
+ return (EBUSY);
+ ordered = 1;
+ } else
+ req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
+ }
+
sglist_reset(sg);
sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
@@ -802,10 +825,13 @@ vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
writable++;
sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
-
readable = sg->sg_nseg - writable;
- return (virtqueue_enqueue(sc->vtblk_vq, req, sg, readable, writable));
+ error = virtqueue_enqueue(vq, req, sg, readable, writable);
+ if (error == 0 && ordered)
+ sc->vtblk_req_ordered = req;
+
+ return (error);
}
static int
@@ -1013,6 +1039,12 @@ vtblk_finish_completed(struct vtblk_softc *sc)
while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
bp = req->vbr_bp;
+ if (sc->vtblk_req_ordered != NULL) {
+ /* This should be the only outstanding request. */
+ MPASS(sc->vtblk_req_ordered == req);
+ sc->vtblk_req_ordered = NULL;
+ }
+
error = vtblk_request_error(req);
if (error)
disk_err(bp, "hard error", -1, 1);
@@ -1039,6 +1071,7 @@ vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
vtblk_enqueue_request(sc, req);
}
+ sc->vtblk_req_ordered = NULL;
KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
}
diff --git a/sys/geom/label/g_label_ntfs.c b/sys/geom/label/g_label_ntfs.c
index f00669e..1ed4a07 100644
--- a/sys/geom/label/g_label_ntfs.c
+++ b/sys/geom/label/g_label_ntfs.c
@@ -55,7 +55,7 @@ struct ntfs_attr {
uint16_t reserved3;
uint16_t a_dataoff;
uint16_t a_indexed;
-};
+} __packed;
struct ntfs_filerec {
uint32_t fr_hdrmagic;
@@ -70,7 +70,7 @@ struct ntfs_filerec {
uint32_t fr_allocated;
uint64_t fr_mainrec;
uint16_t fr_attrnum;
-};
+} __packed;
struct ntfs_bootfile {
uint8_t reserved1[3];
@@ -89,7 +89,7 @@ struct ntfs_bootfile {
uint8_t bf_mftrecsz;
uint32_t bf_ibsz;
uint32_t bf_volsn;
-};
+} __packed;
static void
g_label_ntfs_taste(struct g_consumer *cp, char *label, size_t size)
diff --git a/sys/geom/part/g_part.c b/sys/geom/part/g_part.c
index e2ba79e..f7f6f55 100644
--- a/sys/geom/part/g_part.c
+++ b/sys/geom/part/g_part.c
@@ -80,6 +80,7 @@ struct g_part_alias_list {
{ "bios-boot", G_PART_ALIAS_BIOS_BOOT },
{ "ebr", G_PART_ALIAS_EBR },
{ "efi", G_PART_ALIAS_EFI },
+ { "fat16", G_PART_ALIAS_MS_FAT16 },
{ "fat32", G_PART_ALIAS_MS_FAT32 },
{ "freebsd", G_PART_ALIAS_FREEBSD },
{ "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
diff --git a/sys/geom/part/g_part.h b/sys/geom/part/g_part.h
index 39bcc9d..05a8c57 100644
--- a/sys/geom/part/g_part.h
+++ b/sys/geom/part/g_part.h
@@ -68,6 +68,7 @@ enum g_part_alias {
G_PART_ALIAS_NETBSD_SWAP, /* A NetBSD swap partition entry. */
G_PART_ALIAS_NETBSD_LFS, /* A NetBSD LFS partition entry. */
G_PART_ALIAS_EBR, /* A EBR partition entry. */
+ G_PART_ALIAS_MS_FAT16, /* A Microsoft FAT16 partition entry. */
G_PART_ALIAS_MS_FAT32, /* A Microsoft FAT32 partition entry. */
G_PART_ALIAS_BIOS_BOOT, /* A GRUB 2 boot partition entry. */
G_PART_ALIAS_VMFS, /* A VMware VMFS partition entry */
diff --git a/sys/geom/part/g_part_mbr.c b/sys/geom/part/g_part_mbr.c
index 619553c..d522d02 100644
--- a/sys/geom/part/g_part_mbr.c
+++ b/sys/geom/part/g_part_mbr.c
@@ -118,6 +118,7 @@ static struct g_part_mbr_alias {
{ DOSPTYP_386BSD, G_PART_ALIAS_FREEBSD },
{ DOSPTYP_EXT, G_PART_ALIAS_EBR },
{ DOSPTYP_NTFS, G_PART_ALIAS_MS_NTFS },
+ { DOSPTYP_FAT16, G_PART_ALIAS_MS_FAT16 },
{ DOSPTYP_FAT32, G_PART_ALIAS_MS_FAT32 },
{ DOSPTYP_EXTLBA, G_PART_ALIAS_EBR },
{ DOSPTYP_LDM, G_PART_ALIAS_MS_LDM_DATA },
diff --git a/sys/geom/raid/md_ddf.c b/sys/geom/raid/md_ddf.c
index 7d4bd1e..c18a319 100644
--- a/sys/geom/raid/md_ddf.c
+++ b/sys/geom/raid/md_ddf.c
@@ -881,7 +881,10 @@ ddf_vol_meta_update(struct ddf_vol_meta *dst, struct ddf_meta *src,
hdr = src->hdr;
vde = &src->vdr->entry[ddf_meta_find_vd(src, GUID)];
vdc = ddf_meta_find_vdc(src, GUID);
- bvd = GET8D(src, vdc->Secondary_Element_Seq);
+ if (GET8D(src, vdc->Secondary_Element_Count) == 1)
+ bvd = 0;
+ else
+ bvd = GET8D(src, vdc->Secondary_Element_Seq);
size = GET16(src, hdr->Configuration_Record_Length) * src->sectorsize;
if (dst->vdc == NULL ||
diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC
index 3a98ded..d35a780 100644
--- a/sys/i386/conf/GENERIC
+++ b/sys/i386/conf/GENERIC
@@ -146,7 +146,10 @@ device sa # Sequential Access (tape etc)
device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
device ses # Enclosure Services (SES and SAF-TE)
-#device ctl # CAM Target Layer
+device ctl # CAM Target Layer
+options CTL_DISABLE # Disable CTL by default to save memory.
+ # Re-enable with kern.cam.ctl.disable=0 in
+ # /boot/loader.conf
# RAID controllers interfaced to the SCSI subsystem
device amr # AMI MegaRAID
diff --git a/sys/i386/conf/NOTES b/sys/i386/conf/NOTES
index 95ccd4e..f8f78cc 100644
--- a/sys/i386/conf/NOTES
+++ b/sys/i386/conf/NOTES
@@ -773,6 +773,13 @@ device glxsb # AMD Geode LX Security Block
#
# VirtIO support
+#
+# The virtio entry provides a generic bus for use by the device drivers.
+# It must be combined with an interface that communicates with the host.
+# Multiple such interfaces defined by the VirtIO specification. FreeBSD
+# only has support for PCI. Therefore, virtio_pci must be statically
+# compiled in or loaded as a module for the device drivers to function.
+#
device virtio # Generic VirtIO bus (required)
device virtio_pci # VirtIO PCI Interface
device vtnet # VirtIO Ethernet device
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 4439ccb..2841f8c 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -78,7 +78,8 @@ __FBSDID("$FreeBSD$");
#include <sys/pmckern.h>
PMC_SOFT_DEFINE( , , clock, hard);
PMC_SOFT_DEFINE( , , clock, stat);
-PMC_SOFT_DEFINE( , , clock, prof);
+PMC_SOFT_DEFINE_EX( , , clock, prof, \
+ cpu_startprofclock, cpu_stopprofclock);
#endif
#ifdef DEVICE_POLLING
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 6a920ca..c3d1b86 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -517,39 +517,28 @@ knote_fork(struct knlist *list, int pid)
* XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
* interval timer support code.
*/
-static int
-timertoticks(intptr_t data)
+static __inline sbintime_t
+timer2sbintime(intptr_t data)
{
- struct timeval tv;
- int tticks;
-
- tv.tv_sec = data / 1000;
- tv.tv_usec = (data % 1000) * 1000;
- tticks = tvtohz(&tv);
- return tticks;
+ return (SBT_1MS * data);
}
static void
filt_timerexpire(void *knx)
{
- struct knote *kn = knx;
struct callout *calloutp;
+ struct knote *kn;
+ kn = knx;
kn->kn_data++;
KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */
- /*
- * timertoticks() uses tvtohz() which always adds 1 to allow
- * for the time until the next clock interrupt being strictly
- * less than 1 clock tick. We don't want that here since we
- * want to appear to be in sync with the clock interrupt even
- * when we're delayed.
- */
if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
calloutp = (struct callout *)kn->kn_hook;
- callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata) - 1,
- filt_timerexpire, kn);
+ callout_reset_sbt_on(calloutp,
+ timer2sbintime(kn->kn_sdata), 0 /* 1ms? */,
+ filt_timerexpire, kn, PCPU_GET(cpuid), 0);
}
}
@@ -573,8 +562,9 @@ filt_timerattach(struct knote *kn)
calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
callout_init(calloutp, CALLOUT_MPSAFE);
kn->kn_hook = calloutp;
- callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
- filt_timerexpire, kn);
+ callout_reset_sbt_on(calloutp,
+ timer2sbintime(kn->kn_sdata), 0 /* 1ms? */,
+ filt_timerexpire, kn, PCPU_GET(cpuid), 0);
return (0);
}
@@ -1319,10 +1309,9 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
const struct timespec *tsp, struct kevent *keva, struct thread *td)
{
struct kevent *kevp;
- struct timeval atv, rtv, ttv;
struct knote *kn, *marker;
- int count, timeout, nkev, error, influx;
- int haskqglobal, touch;
+ sbintime_t asbt, rsbt;
+ int count, error, haskqglobal, influx, nkev, touch;
count = maxevents;
nkev = 0;
@@ -1332,24 +1321,28 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
if (maxevents == 0)
goto done_nl;
+ rsbt = 0;
if (tsp != NULL) {
- TIMESPEC_TO_TIMEVAL(&atv, tsp);
- if (itimerfix(&atv)) {
+ if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
+ tsp->tv_nsec >= 1000000000) {
error = EINVAL;
goto done_nl;
}
- if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
- timeout = -1;
- else
- timeout = atv.tv_sec > 24 * 60 * 60 ?
- 24 * 60 * 60 * hz : tvtohz(&atv);
- getmicrouptime(&rtv);
- timevaladd(&atv, &rtv);
- } else {
- atv.tv_sec = 0;
- atv.tv_usec = 0;
- timeout = 0;
- }
+ if (timespecisset(tsp)) {
+ if (tsp->tv_sec < INT32_MAX) {
+ rsbt = tstosbt(*tsp);
+ if (TIMESEL(&asbt, rsbt))
+ asbt += tc_tick_sbt;
+ asbt += rsbt;
+ if (asbt < rsbt)
+ asbt = 0;
+ rsbt >>= tc_precexp;
+ } else
+ asbt = 0;
+ } else
+ asbt = -1;
+ } else
+ asbt = 0;
marker = knote_alloc(1);
if (marker == NULL) {
error = ENOMEM;
@@ -1357,28 +1350,16 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
}
marker->kn_status = KN_MARKER;
KQ_LOCK(kq);
- goto start;
retry:
- if (atv.tv_sec || atv.tv_usec) {
- getmicrouptime(&rtv);
- if (timevalcmp(&rtv, &atv, >=))
- goto done;
- ttv = atv;
- timevalsub(&ttv, &rtv);
- timeout = ttv.tv_sec > 24 * 60 * 60 ?
- 24 * 60 * 60 * hz : tvtohz(&ttv);
- }
-
-start:
kevp = keva;
if (kq->kq_count == 0) {
- if (timeout < 0) {
+ if (asbt == -1) {
error = EWOULDBLOCK;
} else {
kq->kq_state |= KQ_SLEEP;
- error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
- "kqread", timeout);
+ error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
+ "kqread", asbt, rsbt, C_ABSOLUTE);
}
if (error == 0)
goto retry;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 590cfcb..57ee671 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -469,8 +469,7 @@ sys_rtprio(td, uap)
int
rtp_to_pri(struct rtprio *rtp, struct thread *td)
{
- u_char newpri;
- u_char oldpri;
+ u_char newpri, oldclass, oldpri;
switch (RTP_PRIO_BASE(rtp->type)) {
case RTP_PRIO_REALTIME:
@@ -493,11 +492,12 @@ rtp_to_pri(struct rtprio *rtp, struct thread *td)
}
thread_lock(td);
+ oldclass = td->td_pri_class;
sched_class(td, rtp->type); /* XXX fix */
oldpri = td->td_user_pri;
sched_user_prio(td, newpri);
- if (td->td_user_pri != oldpri && (td == curthread ||
- td->td_priority == oldpri || td->td_user_pri <= PRI_MAX_REALTIME))
+ if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
+ td->td_pri_class != RTP_PRIO_NORMAL))
sched_prio(td, td->td_user_pri);
if (TD_ON_UPILOCK(td) && oldpri != newpri) {
critical_enter();
@@ -645,7 +645,8 @@ lim_cb(void *arg)
}
}
if ((p->p_flag & P_WEXIT) == 0)
- callout_reset(&p->p_limco, hz, lim_cb, p);
+ callout_reset_sbt(&p->p_limco, SBT_1S, 0,
+ lim_cb, p, C_PREL(1));
}
int
@@ -697,7 +698,8 @@ kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
case RLIMIT_CPU:
if (limp->rlim_cur != RLIM_INFINITY &&
p->p_cpulimit == RLIM_INFINITY)
- callout_reset(&p->p_limco, hz, lim_cb, p);
+ callout_reset_sbt(&p->p_limco, SBT_1S, 0,
+ lim_cb, p, C_PREL(1));
p->p_cpulimit = limp->rlim_cur;
break;
case RLIMIT_DATA:
@@ -1137,7 +1139,8 @@ lim_fork(struct proc *p1, struct proc *p2)
p2->p_limit = lim_hold(p1->p_limit);
callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
if (p1->p_cpulimit != RLIM_INFINITY)
- callout_reset(&p2->p_limco, hz, lim_cb, p2);
+ callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
+ lim_cb, p2, C_PREL(1));
}
void
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index 97c288d..4225928 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/kernel.h>
+#include <sys/sleepqueue.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@@ -481,38 +482,45 @@ static int nanowait;
int
kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt)
{
- struct timespec ts, ts2, ts3;
- struct timeval tv;
+ struct timespec ts;
+ sbintime_t sbt, sbtt, prec, tmp;
+ time_t over;
int error;
if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
return (EINVAL);
if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
return (0);
- getnanouptime(&ts);
- timespecadd(&ts, rqt);
- TIMESPEC_TO_TIMEVAL(&tv, rqt);
- for (;;) {
- error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp",
- tvtohz(&tv));
- getnanouptime(&ts2);
- if (error != EWOULDBLOCK) {
- if (error == ERESTART)
- error = EINTR;
- if (rmt != NULL) {
- timespecsub(&ts, &ts2);
- if (ts.tv_sec < 0)
- timespecclear(&ts);
- *rmt = ts;
- }
- return (error);
+ ts = *rqt;
+ if (ts.tv_sec > INT32_MAX / 2) {
+ over = ts.tv_sec - INT32_MAX / 2;
+ ts.tv_sec -= over;
+ } else
+ over = 0;
+ tmp = tstosbt(ts);
+ prec = tmp;
+ prec >>= tc_precexp;
+ if (TIMESEL(&sbt, tmp))
+ sbt += tc_tick_sbt;
+ sbt += tmp;
+ error = tsleep_sbt(&nanowait, PWAIT | PCATCH, "nanslp", sbt, prec,
+ C_ABSOLUTE);
+ if (error != EWOULDBLOCK) {
+ if (error == ERESTART)
+ error = EINTR;
+ TIMESEL(&sbtt, tmp);
+ if (rmt != NULL) {
+ ts = sbttots(sbt - sbtt);
+ ts.tv_sec += over;
+ if (ts.tv_sec < 0)
+ timespecclear(&ts);
+ *rmt = ts;
}
- if (timespeccmp(&ts2, &ts, >=))
+ if (sbtt >= sbt)
return (0);
- ts3 = ts;
- timespecsub(&ts3, &ts2);
- TIMESPEC_TO_TIMEVAL(&tv, &ts3);
+ return (error);
}
+ return (0);
}
#ifndef _SYS_SYSPROTO_H_
@@ -683,7 +691,7 @@ kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
*aitv = p->p_realtimer;
PROC_UNLOCK(p);
if (timevalisset(&aitv->it_value)) {
- getmicrouptime(&ctv);
+ microuptime(&ctv);
if (timevalcmp(&aitv->it_value, &ctv, <))
timevalclear(&aitv->it_value);
else
@@ -728,28 +736,33 @@ kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
{
struct proc *p = td->td_proc;
struct timeval ctv;
+ sbintime_t sbt, pr;
if (aitv == NULL)
return (kern_getitimer(td, which, oitv));
if (which > ITIMER_PROF)
return (EINVAL);
- if (itimerfix(&aitv->it_value))
+ if (itimerfix(&aitv->it_value) ||
+ aitv->it_value.tv_sec > INT32_MAX / 2)
return (EINVAL);
if (!timevalisset(&aitv->it_value))
timevalclear(&aitv->it_interval);
- else if (itimerfix(&aitv->it_interval))
+ else if (itimerfix(&aitv->it_interval) ||
+ aitv->it_interval.tv_sec > INT32_MAX / 2)
return (EINVAL);
if (which == ITIMER_REAL) {
PROC_LOCK(p);
if (timevalisset(&p->p_realtimer.it_value))
callout_stop(&p->p_itcallout);
- getmicrouptime(&ctv);
+ microuptime(&ctv);
if (timevalisset(&aitv->it_value)) {
- callout_reset(&p->p_itcallout, tvtohz(&aitv->it_value),
- realitexpire, p);
+ pr = tvtosbt(aitv->it_value) >> tc_precexp;
timevaladd(&aitv->it_value, &ctv);
+ sbt = tvtosbt(aitv->it_value);
+ callout_reset_sbt(&p->p_itcallout, sbt, pr,
+ realitexpire, p, C_ABSOLUTE);
}
*oitv = p->p_realtimer;
p->p_realtimer = *aitv;
@@ -785,7 +798,8 @@ void
realitexpire(void *arg)
{
struct proc *p;
- struct timeval ctv, ntv;
+ struct timeval ctv;
+ sbintime_t isbt;
p = (struct proc *)arg;
kern_psignal(p, SIGALRM);
@@ -795,19 +809,17 @@ realitexpire(void *arg)
wakeup(&p->p_itcallout);
return;
}
- for (;;) {
+ isbt = tvtosbt(p->p_realtimer.it_interval);
+ if (isbt >= sbt_timethreshold)
+ getmicrouptime(&ctv);
+ else
+ microuptime(&ctv);
+ do {
timevaladd(&p->p_realtimer.it_value,
&p->p_realtimer.it_interval);
- getmicrouptime(&ctv);
- if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) {
- ntv = p->p_realtimer.it_value;
- timevalsub(&ntv, &ctv);
- callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1,
- realitexpire, p);
- return;
- }
- }
- /*NOTREACHED*/
+ } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=));
+ callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value),
+ isbt >> tc_precexp, realitexpire, p, C_ABSOLUTE);
}
/*
@@ -822,8 +834,9 @@ itimerfix(struct timeval *tv)
if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
return (EINVAL);
- if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
- tv->tv_usec = tick;
+ if (tv->tv_sec == 0 && tv->tv_usec != 0 &&
+ tv->tv_usec < (u_int)tick / 16)
+ tv->tv_usec = (u_int)tick / 16;
return (0);
}
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 0787c01..df459cd 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -126,8 +126,8 @@ struct cc_exec {
int ce_migration_cpu;
sbintime_t ce_migration_time;
#endif
- boolean_t cc_cancel;
- boolean_t cc_waiting;
+ bool cc_cancel;
+ bool cc_waiting;
};
/*
@@ -215,8 +215,8 @@ cc_cce_cleanup(struct callout_cpu *cc, int direct)
cc->cc_exec_entity[direct].cc_curr = NULL;
cc->cc_exec_entity[direct].cc_next = NULL;
- cc->cc_exec_entity[direct].cc_cancel = FALSE;
- cc->cc_exec_entity[direct].cc_waiting = FALSE;
+ cc->cc_exec_entity[direct].cc_cancel = false;
+ cc->cc_exec_entity[direct].cc_waiting = false;
#ifdef SMP
cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK;
cc->cc_exec_entity[direct].ce_migration_time = 0;
@@ -601,7 +601,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
sbintime_t new_time;
#endif
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
- sbintime_t bt1, bt2;
+ sbintime_t sbt1, sbt2;
struct timespec ts2;
static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
static timeout_t *lastfunc;
@@ -621,7 +621,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
else
c->c_flags &= ~CALLOUT_PENDING;
cc->cc_exec_entity[direct].cc_curr = c;
- cc->cc_exec_entity[direct].cc_cancel = FALSE;
+ cc->cc_exec_entity[direct].cc_cancel = false;
CC_UNLOCK(cc);
if (c_lock != NULL) {
class->lc_lock(c_lock, sharedlock);
@@ -634,7 +634,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
goto skip;
}
/* The callout cannot be stopped now. */
- cc->cc_exec_entity[direct].cc_cancel = TRUE;
+ cc->cc_exec_entity[direct].cc_cancel = true;
if (c_lock == &Giant.lock_object) {
#ifdef CALLOUT_PROFILING
(*gcalls)++;
@@ -655,7 +655,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
c, c_func, c_arg);
}
-#ifdef DIAGNOSTIC
+#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
sbt1 = sbinuptime();
#endif
THREAD_NO_SLEEPING();
@@ -663,17 +663,17 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
c_func(c_arg);
SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
THREAD_SLEEPING_OK();
-#ifdef DIAGNOSTIC
- bt2 = sbinuptime();
- bt2 -= bt1;
- if (bt2 > maxdt) {
- if (lastfunc != c_func || bt2 > maxdt * 2) {
- ts2 = sbttots(bt2);
+#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
+ sbt2 = sbinuptime();
+ sbt2 -= sbt1;
+ if (sbt2 > maxdt) {
+ if (lastfunc != c_func || sbt2 > maxdt * 2) {
+ ts2 = sbttots(sbt2);
printf(
"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
}
- maxdt = bt2;
+ maxdt = sbt2;
lastfunc = c_func;
}
#endif
@@ -700,7 +700,7 @@ skip:
*/
c->c_flags &= ~CALLOUT_DFRMIGRATION;
}
- cc->cc_exec_entity[direct].cc_waiting = FALSE;
+ cc->cc_exec_entity[direct].cc_waiting = false;
CC_UNLOCK(cc);
wakeup(&cc->cc_exec_entity[direct].cc_waiting);
CC_LOCK(cc);
@@ -954,7 +954,7 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
* can cancel the callout if it has not really started.
*/
if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel)
- cancelled = cc->cc_exec_entity[direct].cc_cancel = TRUE;
+ cancelled = cc->cc_exec_entity[direct].cc_cancel = true;
if (cc->cc_exec_entity[direct].cc_waiting) {
/*
* Someone has called callout_drain to kill this
@@ -1135,7 +1135,7 @@ again:
* will be packed up, just let softclock()
* take care of it.
*/
- cc->cc_exec_entity[direct].cc_waiting = TRUE;
+ cc->cc_exec_entity[direct].cc_waiting = true;
DROP_GIANT();
CC_UNLOCK(cc);
sleepq_add(
@@ -1161,7 +1161,7 @@ again:
* lock, the callout will be skipped in
* softclock().
*/
- cc->cc_exec_entity[direct].cc_cancel = TRUE;
+ cc->cc_exec_entity[direct].cc_cancel = true;
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
KASSERT(!cc_cce_migrating(cc, direct),
diff --git a/sys/kern/subr_log.c b/sys/kern/subr_log.c
index 8981358..1e61274 100644
--- a/sys/kern/subr_log.c
+++ b/sys/kern/subr_log.c
@@ -117,8 +117,8 @@ logopen(struct cdev *dev, int flags, int mode, struct thread *td)
return (EBUSY);
}
log_open = 1;
- callout_reset(&logsoftc.sc_callout, hz / log_wakeups_per_second,
- logtimeout, NULL);
+ callout_reset_sbt(&logsoftc.sc_callout,
+ SBT_1S / log_wakeups_per_second, 0, logtimeout, NULL, C_PREL(1));
mtx_unlock(&msgbuf_lock);
fsetown(td->td_proc->p_pid, &logsoftc.sc_sigio); /* signal process only */
@@ -233,22 +233,21 @@ logtimeout(void *arg)
if (!log_open)
return;
- if (log_wakeups_per_second < 1) {
- printf("syslog wakeup is less than one. Adjusting to 1.\n");
- log_wakeups_per_second = 1;
- }
- if (msgbuftrigger == 0) {
- callout_schedule(&logsoftc.sc_callout,
- hz / log_wakeups_per_second);
- return;
- }
+ if (msgbuftrigger == 0)
+ goto done;
msgbuftrigger = 0;
selwakeuppri(&logsoftc.sc_selp, LOG_RDPRI);
KNOTE_LOCKED(&logsoftc.sc_selp.si_note, 0);
if ((logsoftc.sc_state & LOG_ASYNC) && logsoftc.sc_sigio != NULL)
pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
cv_broadcastpri(&log_wakeup, LOG_RDPRI);
- callout_schedule(&logsoftc.sc_callout, hz / log_wakeups_per_second);
+done:
+ if (log_wakeups_per_second < 1) {
+ printf("syslog wakeup is less than one. Adjusting to 1.\n");
+ log_wakeups_per_second = 1;
+ }
+ callout_reset_sbt(&logsoftc.sc_callout,
+ SBT_1S / log_wakeups_per_second, 0, logtimeout, NULL, C_PREL(1));
}
/*ARGSUSED*/
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index 39f33f3..2d149b5 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -103,7 +103,7 @@ static int dofilewrite(struct thread *, int, struct file *, struct uio *,
off_t, int);
static void doselwakeup(struct selinfo *, int);
static void seltdinit(struct thread *);
-static int seltdwait(struct thread *, int);
+static int seltdwait(struct thread *, sbintime_t, sbintime_t);
static void seltdclear(struct thread *);
/*
@@ -950,9 +950,10 @@ kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
*/
fd_mask s_selbits[howmany(2048, NFDBITS)];
fd_mask *ibits[3], *obits[3], *selbits, *sbp;
- struct timeval atv, rtv, ttv;
- int error, lf, ndu, timo;
+ struct timeval rtv;
+ sbintime_t asbt, precision, rsbt;
u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
+ int error, lf, ndu;
if (nd < 0)
return (EINVAL);
@@ -1042,35 +1043,36 @@ kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
if (nbufbytes != 0)
bzero(selbits, nbufbytes / 2);
+ precision = 0;
if (tvp != NULL) {
- atv = *tvp;
- if (itimerfix(&atv)) {
+ rtv = *tvp;
+ if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
+ rtv.tv_usec >= 1000000) {
error = EINVAL;
goto done;
}
- getmicrouptime(&rtv);
- timevaladd(&atv, &rtv);
- } else {
- atv.tv_sec = 0;
- atv.tv_usec = 0;
- }
- timo = 0;
+ if (rtv.tv_sec == 0 && rtv.tv_usec == 0)
+ asbt = 0;
+ else if (rtv.tv_sec < INT32_MAX) {
+ rsbt = tvtosbt(rtv);
+ precision = rsbt;
+ precision >>= tc_precexp;
+ if (TIMESEL(&asbt, rsbt))
+ asbt += tc_tick_sbt;
+ asbt += rsbt;
+ if (asbt < rsbt)
+ asbt = -1;
+ } else
+ asbt = -1;
+ } else
+ asbt = -1;
seltdinit(td);
/* Iterate until the timeout expires or descriptors become ready. */
for (;;) {
error = selscan(td, ibits, obits, nd);
if (error || td->td_retval[0] != 0)
break;
- if (atv.tv_sec || atv.tv_usec) {
- getmicrouptime(&rtv);
- if (timevalcmp(&rtv, &atv, >=))
- break;
- ttv = atv;
- timevalsub(&ttv, &rtv);
- timo = ttv.tv_sec > 24 * 60 * 60 ?
- 24 * 60 * 60 * hz : tvtohz(&ttv);
- }
- error = seltdwait(td, timo);
+ error = seltdwait(td, asbt, precision);
if (error)
break;
error = selrescan(td, ibits, obits);
@@ -1278,9 +1280,9 @@ sys_poll(td, uap)
{
struct pollfd *bits;
struct pollfd smallbits[32];
- struct timeval atv, rtv, ttv;
- int error, timo;
+ sbintime_t asbt, precision, rsbt;
u_int nfds;
+ int error;
size_t ni;
nfds = uap->nfds;
@@ -1294,36 +1296,31 @@ sys_poll(td, uap)
error = copyin(uap->fds, bits, ni);
if (error)
goto done;
+ precision = 0;
if (uap->timeout != INFTIM) {
- atv.tv_sec = uap->timeout / 1000;
- atv.tv_usec = (uap->timeout % 1000) * 1000;
- if (itimerfix(&atv)) {
+ if (uap->timeout < 0) {
error = EINVAL;
goto done;
}
- getmicrouptime(&rtv);
- timevaladd(&atv, &rtv);
- } else {
- atv.tv_sec = 0;
- atv.tv_usec = 0;
- }
- timo = 0;
+ if (uap->timeout == 0)
+ asbt = 0;
+ else {
+ rsbt = SBT_1MS * uap->timeout;
+ precision = rsbt;
+ precision >>= tc_precexp;
+ if (TIMESEL(&asbt, rsbt))
+ asbt += tc_tick_sbt;
+ asbt += rsbt;
+ }
+ } else
+ asbt = -1;
seltdinit(td);
/* Iterate until the timeout expires or descriptors become ready. */
for (;;) {
error = pollscan(td, bits, nfds);
if (error || td->td_retval[0] != 0)
break;
- if (atv.tv_sec || atv.tv_usec) {
- getmicrouptime(&rtv);
- if (timevalcmp(&rtv, &atv, >=))
- break;
- ttv = atv;
- timevalsub(&ttv, &rtv);
- timo = ttv.tv_sec > 24 * 60 * 60 ?
- 24 * 60 * 60 * hz : tvtohz(&ttv);
- }
- error = seltdwait(td, timo);
+ error = seltdwait(td, asbt, precision);
if (error)
break;
error = pollrescan(td);
@@ -1667,7 +1664,7 @@ out:
}
static int
-seltdwait(struct thread *td, int timo)
+seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
{
struct seltd *stp;
int error;
@@ -1686,8 +1683,11 @@ seltdwait(struct thread *td, int timo)
mtx_unlock(&stp->st_mtx);
return (0);
}
- if (timo > 0)
- error = cv_timedwait_sig(&stp->st_wait, &stp->st_mtx, timo);
+ if (sbt == 0)
+ error = EWOULDBLOCK;
+ else if (sbt != -1)
+ error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
+ sbt, precision, C_ABSOLUTE);
else
error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
mtx_unlock(&stp->st_mtx);
diff --git a/sys/modules/drm2/drm2/Makefile b/sys/modules/drm2/drm2/Makefile
index 1bc979d..215a0ed 100644
--- a/sys/modules/drm2/drm2/Makefile
+++ b/sys/modules/drm2/drm2/Makefile
@@ -1,6 +1,6 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/../../../dev/drm2
+.PATH: ${.CURDIR}/../../../dev/drm2 ${.CURDIR}/../../../dev/drm2/ttm
KMOD = drm2
SRCS = \
drm_agpsupport.c \
@@ -18,6 +18,7 @@ SRCS = \
drm_fops.c \
drm_gem.c \
drm_gem_names.c \
+ drm_global.c \
drm_hashtab.c \
drm_ioctl.c \
drm_irq.c \
@@ -31,7 +32,19 @@ SRCS = \
drm_sman.c \
drm_stub.c \
drm_sysctl.c \
- drm_vm.c
+ drm_vm.c \
+ ttm_lock.c \
+ ttm_object.c \
+ ttm_tt.c \
+ ttm_bo_util.c \
+ ttm_bo.c \
+ ttm_bo_manager.c \
+ ttm_execbuf_util.c \
+ ttm_memory.c \
+ ttm_page_alloc.c \
+ ttm_bo_vm.c
+#ttm_agp_backend.c
+#ttm_page_alloc_dma.c
.if ${MACHINE_CPUARCH} == "amd64"
SRCS += drm_ioc32.c
diff --git a/sys/modules/uart/Makefile b/sys/modules/uart/Makefile
index 61bcdec..d8a4869 100644
--- a/sys/modules/uart/Makefile
+++ b/sys/modules/uart/Makefile
@@ -26,6 +26,7 @@ SRCS= uart_bus_acpi.c ${uart_bus_ebus} uart_bus_isa.c uart_bus_pccard.c \
SRCS+= bus_if.h card_if.h device_if.h isa_if.h ${ofw_bus_if} pci_if.h \
power_if.h pccarddevs.h serdev_if.h
+SRCS+= opt_platform.h
MFILES= dev/pccard/card_if.m dev/pccard/power_if.m dev/pci/pci_if.m \
dev/ofw/ofw_bus_if.m dev/uart/uart_if.m isa/isa_if.m kern/bus_if.m \
diff --git a/sys/net/route.c b/sys/net/route.c
index e69ce48..c0f6526 100644
--- a/sys/net/route.c
+++ b/sys/net/route.c
@@ -1498,7 +1498,7 @@ rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
if (rnh == NULL)
/* this table doesn't exist but others might */
continue;
- RADIX_NODE_HEAD_LOCK(rnh);
+ RADIX_NODE_HEAD_RLOCK(rnh);
#ifdef RADIX_MPATH
if (rn_mpath_capable(rnh)) {
@@ -1527,7 +1527,7 @@ rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
(rn->rn_flags & RNF_ROOT) ||
RNTORT(rn)->rt_ifa != ifa ||
!sa_equal((struct sockaddr *)rn->rn_key, dst));
- RADIX_NODE_HEAD_UNLOCK(rnh);
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
if (error) {
/* this is only an error if bad on ALL tables */
continue;
diff --git a/sys/netinet/siftr.c b/sys/netinet/siftr.c
index b0e111c..886be06 100644
--- a/sys/netinet/siftr.c
+++ b/sys/netinet/siftr.c
@@ -1314,7 +1314,7 @@ siftr_manage_ops(uint8_t action)
* flow seen and freeing any malloc'd memory.
* The hash consists of an array of LISTs (man 3 queue).
*/
- for (i = 0; i < siftr_hashmask; i++) {
+ for (i = 0; i <= siftr_hashmask; i++) {
LIST_FOREACH_SAFE(counter, counter_hash + i, nodes,
tmp_counter) {
key = counter->key;
diff --git a/sys/sparc64/pci/psycho.c b/sys/sparc64/pci/psycho.c
index a6e1469..8717426 100644
--- a/sys/sparc64/pci/psycho.c
+++ b/sys/sparc64/pci/psycho.c
@@ -200,7 +200,7 @@ struct psycho_icarg {
* "Sabre" is the UltraSPARC IIi onboard UPA to PCI bridge. It manages a
* single PCI bus and does not have a streaming buffer. It often has an APB
* (advanced PCI bridge) connected to it, which was designed specifically for
- * the IIi. The APB let's the IIi handle two independednt PCI buses, and
+ * the IIi. The APB lets the IIi handle two independent PCI buses, and
* appears as two "Simba"'s underneath the Sabre.
*
* "Hummingbird" is the UltraSPARC IIe onboard UPA to PCI bridge. It's
diff --git a/sys/sys/diskmbr.h b/sys/sys/diskmbr.h
index 747f70b..c92364d 100644
--- a/sys/sys/diskmbr.h
+++ b/sys/sys/diskmbr.h
@@ -45,6 +45,7 @@
#define DOSMAGIC 0xAA55
#define DOSPTYP_EXT 0x05 /* DOS extended partition */
+#define DOSPTYP_FAT16 0x06 /* FAT16 partition */
#define DOSPTYP_NTFS 0x07 /* NTFS partition */
#define DOSPTYP_FAT32 0x0b /* FAT32 partition */
#define DOSPTYP_EXTLBA 0x0f /* DOS extended partition */
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 26db635..ce72863 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1000028 /* Master, propagated to newvers */
+#define __FreeBSD_version 1000029 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/pmckern.h b/sys/sys/pmckern.h
index c623648..e3e18a6 100644
--- a/sys/sys/pmckern.h
+++ b/sys/sys/pmckern.h
@@ -87,9 +87,9 @@ struct pmckern_soft {
* Soft PMC.
*/
-#define PMC_SOFT_DEFINE(prov, mod, func, name) \
+#define PMC_SOFT_DEFINE_EX(prov, mod, func, name, alloc, release) \
struct pmc_soft pmc_##prov##_##mod##_##func##_##name = \
- { 0, { #prov "_" #mod "_" #func "." #name, 0 } }; \
+ { 0, alloc, release, { #prov "_" #mod "_" #func "." #name, 0 } }; \
SYSINIT(pmc_##prov##_##mod##_##func##_##name##_init, SI_SUB_KDTRACE, \
SI_ORDER_SECOND + 1, pmc_soft_ev_register, \
&pmc_##prov##_##mod##_##func##_##name ); \
@@ -97,6 +97,9 @@ struct pmckern_soft {
SI_SUB_KDTRACE, SI_ORDER_SECOND + 1, pmc_soft_ev_deregister, \
&pmc_##prov##_##mod##_##func##_##name )
+#define PMC_SOFT_DEFINE(prov, mod, func, name) \
+ PMC_SOFT_DEFINE_EX(prov, mod, func, name, NULL, NULL)
+
#define PMC_SOFT_DECLARE(prov, mod, func, name) \
extern struct pmc_soft pmc_##prov##_##mod##_##func##_##name
@@ -147,6 +150,8 @@ do { \
struct pmc_soft {
int ps_running;
+ void (*ps_alloc)(void);
+ void (*ps_release)(void);
struct pmc_dyn_event_descr ps_ev;
};
diff --git a/tools/tools/cxgbetool/cxgbetool.c b/tools/tools/cxgbetool/cxgbetool.c
index 39e7376..32f6d26 100644
--- a/tools/tools/cxgbetool/cxgbetool.c
+++ b/tools/tools/cxgbetool/cxgbetool.c
@@ -31,6 +31,7 @@ __FBSDID("$FreeBSD$");
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
+#include <ctype.h>
#include <errno.h>
#include <err.h>
#include <fcntl.h>
diff --git a/usr.bin/c89/c89.c b/usr.bin/c89/c89.c
index a1dc9b2..930153f 100644
--- a/usr.bin/c89/c89.c
+++ b/usr.bin/c89/c89.c
@@ -72,7 +72,7 @@ main(int argc, char **argv)
Argv.a = malloc((argc + 1 + N_ARGS_PREPENDED) * sizeof *Argv.a);
if (Argv.a == NULL)
err(1, "malloc");
- Argv.a[Argc++] = argv[0];
+ Argv.a[Argc++] = CC;
for (j = 0; j < N_ARGS_PREPENDED; ++j)
Argv.a[Argc++] = args_prepended[j];
while ((i = getopt(argc, argv, "cD:EgI:l:L:o:OsU:")) != -1) {
diff --git a/usr.bin/c99/c99.c b/usr.bin/c99/c99.c
index f5f273f..79cca89 100644
--- a/usr.bin/c99/c99.c
+++ b/usr.bin/c99/c99.c
@@ -70,7 +70,7 @@ main(int argc, char *argv[])
usage();
}
- addarg("cc");
+ addarg("/usr/bin/cc");
addarg("-std=iso9899:1999");
addarg("-pedantic");
for (i = 1; i < optind; i++)
diff --git a/usr.sbin/bhyve/acpi.c b/usr.sbin/bhyve/acpi.c
index cabe75e..f9504f8 100644
--- a/usr.sbin/bhyve/acpi.c
+++ b/usr.sbin/bhyve/acpi.c
@@ -687,12 +687,16 @@ basl_load(int fd, uint64_t off)
int err;
err = 0;
- gaddr = paddr_guest2host(basl_acpi_base + off, sb.st_size);
- if (gaddr != NULL) {
- if (fstat(fd, &sb) < 0 || read(fd, gaddr, sb.st_size) < 0)
- err = errno;
- } else
- err = EFAULT;
+ if (fstat(fd, &sb) < 0) {
+ err = errno;
+ } else {
+ gaddr = paddr_guest2host(basl_acpi_base + off, sb.st_size);
+ if (gaddr != NULL) {
+ if (read(fd, gaddr, sb.st_size) < 0)
+ err = errno;
+ } else
+ err = EFAULT;
+ }
return (err);
}
diff --git a/usr.sbin/bhyve/pci_virtio_block.c b/usr.sbin/bhyve/pci_virtio_block.c
index 31ff2e6..0c34666 100644
--- a/usr.sbin/bhyve/pci_virtio_block.c
+++ b/usr.sbin/bhyve/pci_virtio_block.c
@@ -164,14 +164,19 @@ pci_vtblk_iosize(struct pci_devinst *pi)
static int
hq_num_avail(struct vring_hqueue *hq)
{
- int ndesc;
+ uint16_t ndesc;
- if (*hq->hq_avail_idx >= hq->hq_cur_aidx)
- ndesc = *hq->hq_avail_idx - hq->hq_cur_aidx;
- else
- ndesc = UINT16_MAX - hq->hq_cur_aidx + *hq->hq_avail_idx + 1;
+ /*
+ * We're just computing (a-b) in GF(216).
+ *
+ * The only glitch here is that in standard C,
+ * uint16_t promotes to (signed) int when int has
+ * more than 16 bits (pretty much always now), so
+ * we have to force it back to unsigned.
+ */
+ ndesc = (unsigned)*hq->hq_avail_idx - (unsigned)hq->hq_cur_aidx;
- assert(ndesc >= 0 && ndesc <= hq->hq_size);
+ assert(ndesc <= hq->hq_size);
return (ndesc);
}
diff --git a/usr.sbin/bhyve/pci_virtio_net.c b/usr.sbin/bhyve/pci_virtio_net.c
index a5cf8b3..11647d6 100644
--- a/usr.sbin/bhyve/pci_virtio_net.c
+++ b/usr.sbin/bhyve/pci_virtio_net.c
@@ -172,12 +172,17 @@ hq_num_avail(struct vring_hqueue *hq)
{
int ndesc;
- if (*hq->hq_avail_idx >= hq->hq_cur_aidx)
- ndesc = *hq->hq_avail_idx - hq->hq_cur_aidx;
- else
- ndesc = UINT16_MAX - hq->hq_cur_aidx + *hq->hq_avail_idx + 1;
+ /*
+ * We're just computing (a-b) in GF(216).
+ *
+ * The only glitch here is that in standard C,
+ * uint16_t promotes to (signed) int when int has
+ * more than 16 bits (pretty much always now), so
+ * we have to force it back to unsigned.
+ */
+ ndesc = (unsigned)*hq->hq_avail_idx - (unsigned)hq->hq_cur_aidx;
- assert(ndesc >= 0 && ndesc <= hq->hq_size);
+ assert(ndesc <= hq->hq_size);
return (ndesc);
}
diff --git a/usr.sbin/pkg/Makefile b/usr.sbin/pkg/Makefile
index c2ca0a3..0e1c2d8 100644
--- a/usr.sbin/pkg/Makefile
+++ b/usr.sbin/pkg/Makefile
@@ -1,10 +1,10 @@
# $FreeBSD$
PROG= pkg
-SRCS= pkg.c dns_utils.c
+SRCS= pkg.c dns_utils.c config.c
NO_MAN= yes
-DPADD= ${LIBARCHIVE} ${LIBELF} ${LIBFETCH}
-LDADD= -larchive -lelf -lfetch
+DPADD= ${LIBARCHIVE} ${LIBELF} ${LIBFETCH} ${LIBBSDYML} ${LIBSUBF}
+LDADD= -larchive -lelf -lfetch -lbsdyml -lsbuf
.include <bsd.prog.mk>
diff --git a/usr.sbin/pkg/config.c b/usr.sbin/pkg/config.c
new file mode 100644
index 0000000..5924d57
--- /dev/null
+++ b/usr.sbin/pkg/config.c
@@ -0,0 +1,428 @@
+/*-
+ * Copyright (c) 2013 Baptiste Daroussin <bapt@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/sbuf.h>
+#include <sys/elf_common.h>
+#include <sys/endian.h>
+
+#include <bsdyml.h>
+#include <ctype.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <gelf.h>
+#include <inttypes.h>
+#include <paths.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "elf_tables.h"
+#include "config.h"
+
+#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */
+
+struct config_entry {
+ uint8_t type;
+ const char *key;
+ const char *val;
+ char *value;
+ bool envset;
+};
+
+static struct config_entry c[] = {
+ [PACKAGESITE] = {
+ PKG_CONFIG_STRING,
+ "PACKAGESITE",
+ "http://pkg.FreeBSD.org/${ABI}/latest",
+ NULL,
+ false,
+ },
+ [ABI] = {
+ PKG_CONFIG_STRING,
+ "ABI",
+ NULL,
+ NULL,
+ false,
+ },
+ [MIRROR_TYPE] = {
+ PKG_CONFIG_STRING,
+ "MIRROR_TYPE",
+ "SRV",
+ NULL,
+ false,
+ },
+ [ASSUME_ALWAYS_YES] = {
+ PKG_CONFIG_BOOL,
+ "ASSUME_ALWAYS_YES",
+ "NO",
+ NULL,
+ false,
+ }
+};
+
+static const char *
+elf_corres_to_string(struct _elf_corres *m, int e)
+{
+ int i;
+
+ for (i = 0; m[i].string != NULL; i++)
+ if (m[i].elf_nb == e)
+ return (m[i].string);
+
+ return ("unknown");
+}
+
+static int
+pkg_get_myabi(char *dest, size_t sz)
+{
+ Elf *elf;
+ Elf_Data *data;
+ Elf_Note note;
+ Elf_Scn *scn;
+ char *src, *osname;
+ const char *abi;
+ GElf_Ehdr elfhdr;
+ GElf_Shdr shdr;
+ int fd, i, ret;
+ uint32_t version;
+
+ version = 0;
+ ret = -1;
+ scn = NULL;
+ abi = NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ warnx("ELF library initialization failed: %s",
+ elf_errmsg(-1));
+ return (-1);
+ }
+
+ if ((fd = open(_PATH_BSHELL, O_RDONLY)) < 0) {
+ warn("open()");
+ return (-1);
+ }
+
+ if ((elf = elf_begin(fd, ELF_C_READ, NULL)) == NULL) {
+ ret = -1;
+ warnx("elf_begin() failed: %s.", elf_errmsg(-1));
+ goto cleanup;
+ }
+
+ if (gelf_getehdr(elf, &elfhdr) == NULL) {
+ ret = -1;
+ warn("getehdr() failed: %s.", elf_errmsg(-1));
+ goto cleanup;
+ }
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ if (gelf_getshdr(scn, &shdr) != &shdr) {
+ ret = -1;
+ warn("getshdr() failed: %s.", elf_errmsg(-1));
+ goto cleanup;
+ }
+
+ if (shdr.sh_type == SHT_NOTE)
+ break;
+ }
+
+ if (scn == NULL) {
+ ret = -1;
+ warn("failed to get the note section");
+ goto cleanup;
+ }
+
+ data = elf_getdata(scn, NULL);
+ src = data->d_buf;
+ for (;;) {
+ memcpy(&note, src, sizeof(Elf_Note));
+ src += sizeof(Elf_Note);
+ if (note.n_type == NT_VERSION)
+ break;
+ src += note.n_namesz + note.n_descsz;
+ }
+ osname = src;
+ src += roundup2(note.n_namesz, 4);
+ if (elfhdr.e_ident[EI_DATA] == ELFDATA2MSB)
+ version = be32dec(src);
+ else
+ version = le32dec(src);
+
+ for (i = 0; osname[i] != '\0'; i++)
+ osname[i] = (char)tolower(osname[i]);
+
+ snprintf(dest, sz, "%s:%d:%s:%s",
+ osname, version / 100000,
+ elf_corres_to_string(mach_corres, (int)elfhdr.e_machine),
+ elf_corres_to_string(wordsize_corres,
+ (int)elfhdr.e_ident[EI_CLASS]));
+
+ ret = 0;
+
+ switch (elfhdr.e_machine) {
+ case EM_ARM:
+ snprintf(dest + strlen(dest), sz - strlen(dest),
+ ":%s:%s:%s", elf_corres_to_string(endian_corres,
+ (int)elfhdr.e_ident[EI_DATA]),
+ (elfhdr.e_flags & EF_ARM_NEW_ABI) > 0 ?
+ "eabi" : "oabi",
+ (elfhdr.e_flags & EF_ARM_VFP_FLOAT) > 0 ?
+ "softfp" : "vfp");
+ break;
+ case EM_MIPS:
+ /*
+ * this is taken from binutils sources:
+ * include/elf/mips.h
+ * mapping is figured out from binutils:
+ * gas/config/tc-mips.c
+ */
+ switch (elfhdr.e_flags & EF_MIPS_ABI) {
+ case E_MIPS_ABI_O32:
+ abi = "o32";
+ break;
+ case E_MIPS_ABI_N32:
+ abi = "n32";
+ break;
+ default:
+ if (elfhdr.e_ident[EI_DATA] ==
+ ELFCLASS32)
+ abi = "o32";
+ else if (elfhdr.e_ident[EI_DATA] ==
+ ELFCLASS64)
+ abi = "n64";
+ break;
+ }
+ snprintf(dest + strlen(dest), sz - strlen(dest),
+ ":%s:%s", elf_corres_to_string(endian_corres,
+ (int)elfhdr.e_ident[EI_DATA]), abi);
+ break;
+ }
+
+cleanup:
+ if (elf != NULL)
+ elf_end(elf);
+
+ close(fd);
+ return (ret);
+}
+
+static void
+subst_packagesite(const char *abi)
+{
+ struct sbuf *newval;
+ const char *variable_string;
+ const char *oldval;
+
+ if (c[PACKAGESITE].value != NULL)
+ oldval = c[PACKAGESITE].value;
+ else
+ oldval = c[PACKAGESITE].val;
+
+ if ((variable_string = strstr(oldval, "${ABI}")) == NULL)
+ return;
+
+ newval = sbuf_new_auto();
+ sbuf_bcat(newval, oldval, variable_string - oldval);
+ sbuf_cat(newval, abi);
+ sbuf_cat(newval, variable_string + strlen("${ABI}"));
+ sbuf_finish(newval);
+
+ free(c[PACKAGESITE].value);
+ c[PACKAGESITE].value = strdup(sbuf_data(newval));
+}
+
+static void
+config_parse(yaml_document_t *doc, yaml_node_t *node)
+{
+ yaml_node_pair_t *pair;
+ yaml_node_t *key, *val;
+ struct sbuf *buf = sbuf_new_auto();
+ int i;
+ size_t j;
+
+ pair = node->data.mapping.pairs.start;
+
+ while (pair < node->data.mapping.pairs.top) {
+ key = yaml_document_get_node(doc, pair->key);
+ val = yaml_document_get_node(doc, pair->value);
+
+ /*
+ * ignoring silently empty keys can be empty lines
+ * or user mistakes
+ */
+ if (key->data.scalar.length <= 0) {
+ ++pair;
+ continue;
+ }
+
+ /*
+ * silently skip on purpose to allow user to leave
+ * empty lines without complaining
+ */
+ if (val->type == YAML_NO_NODE ||
+ (val->type == YAML_SCALAR_NODE &&
+ val->data.scalar.length <= 0)) {
+ ++pair;
+ continue;
+ }
+
+ sbuf_clear(buf);
+ for (j = 0; j < strlen(key->data.scalar.value); ++j)
+ sbuf_putc(buf, toupper(key->data.scalar.value[j]));
+
+ sbuf_finish(buf);
+ for (i = 0; i < CONFIG_SIZE; i++) {
+ if (strcmp(sbuf_data(buf), c[i].key) == 0)
+ break;
+ }
+
+ if (i == CONFIG_SIZE) {
+ ++pair;
+ continue;
+ }
+
+ /* env has priority over config file */
+ if (c[i].envset) {
+ ++pair;
+ continue;
+ }
+
+ c[i].value = strdup(val->data.scalar.value);
+ ++pair;
+ }
+
+ sbuf_delete(buf);
+}
+
+int
+config_init(void)
+{
+ FILE *fp;
+ yaml_parser_t parser;
+ yaml_document_t doc;
+ yaml_node_t *node;
+ const char *val;
+ int i;
+ const char *localbase;
+ char confpath[MAXPATHLEN];
+ char abi[BUFSIZ];
+
+ for (i = 0; i < CONFIG_SIZE; i++) {
+ val = getenv(c[i].key);
+ if (val != NULL) {
+ c[i].val = val;
+ c[i].envset = true;
+ }
+ }
+
+ localbase = getenv("LOCALBASE") ? getenv("LOCALBASE") : _LOCALBASE;
+ snprintf(confpath, sizeof(confpath), "%s/etc/pkg.conf", localbase);
+
+ if ((fp = fopen(confpath, "r")) == NULL) {
+ if (errno != ENOENT)
+ err(EXIT_FAILURE, "Unable to open configuration file %s", confpath);
+ /* no configuration present */
+ goto finalize;
+ }
+
+ yaml_parser_initialize(&parser);
+ yaml_parser_set_input_file(&parser, fp);
+ yaml_parser_load(&parser, &doc);
+
+ node = yaml_document_get_root_node(&doc);
+
+ if (node != NULL) {
+ if (node->type != YAML_MAPPING_NODE)
+ warnx("Invalid configuration format, ignoring the configuration file");
+ else
+ config_parse(&doc, node);
+ } else {
+ warnx("Invalid configuration format, ignoring the configuration file");
+ }
+
+ yaml_document_delete(&doc);
+ yaml_parser_delete(&parser);
+
+finalize:
+ if (c[ABI].val == NULL && c[ABI].value == NULL) {
+ if (pkg_get_myabi(abi, BUFSIZ) != 0)
+ errx(EXIT_FAILURE, "Failed to determine the system ABI");
+ c[ABI].val = abi;
+ }
+
+ subst_packagesite(c[ABI].value != NULL ? c[ABI].value : c[ABI].val);
+
+ return (0);
+}
+
+int
+config_string(pkg_config_key k, const char **val)
+{
+ if (c[k].type != PKG_CONFIG_STRING)
+ return (-1);
+
+ if (c[k].value != NULL)
+ *val = c[k].value;
+ else
+ *val = c[k].val;
+
+ return (0);
+}
+
+int
+config_bool(pkg_config_key k, bool *val)
+{
+ const char *value;
+
+ if (c[k].type != PKG_CONFIG_BOOL)
+ return (-1);
+
+ *val = false;
+
+ if (c[k].value != NULL)
+ value = c[k].value;
+ else
+ value = c[k].val;
+
+ if (strcasecmp(value, "true") == 0 ||
+ strcasecmp(value, "yes") == 0 ||
+ strcasecmp(value, "on") == 0 ||
+ *value == '1')
+ *val = true;
+
+ return (0);
+}
+
+void
+config_finish(void) {
+ int i;
+
+ for (i = 0; i < CONFIG_SIZE; i++)
+ free(c[i].value);
+}
diff --git a/usr.sbin/pkg/config.h b/usr.sbin/pkg/config.h
new file mode 100644
index 0000000..e592f6d
--- /dev/null
+++ b/usr.sbin/pkg/config.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2013 Baptiste Daroussin <bapt@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PKG_CONFIG_H
+#define _PKG_CONFIG_H
+
+#define _LOCALBASE "/usr/local"
+
+typedef enum {
+ PACKAGESITE = 0,
+ ABI,
+ MIRROR_TYPE,
+ ASSUME_ALWAYS_YES,
+ CONFIG_SIZE
+} pkg_config_key;
+
+typedef enum {
+ PKG_CONFIG_STRING=0,
+ PKG_CONFIG_BOOL,
+} pkg_config_t;
+
+int config_init(void);
+void config_finish(void);
+int config_string(pkg_config_key, const char **);
+int config_bool(pkg_config_key, bool *);
+
+#endif
diff --git a/usr.sbin/pkg/pkg.c b/usr.sbin/pkg/pkg.c
index 32591f3..ccb1b91 100644
--- a/usr.sbin/pkg/pkg.c
+++ b/usr.sbin/pkg/pkg.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012 Baptiste Daroussin <bapt@FreeBSD.org>
+ * Copyright (c) 2012-2013 Baptiste Daroussin <bapt@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,175 +28,23 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
-#include <sys/elf_common.h>
-#include <sys/endian.h>
#include <sys/wait.h>
#include <archive.h>
#include <archive_entry.h>
-#include <ctype.h>
#include <err.h>
#include <errno.h>
-#include <fcntl.h>
#include <fetch.h>
-#include <gelf.h>
#include <paths.h>
+#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
-#include "elf_tables.h"
#include "dns_utils.h"
-
-#define _LOCALBASE "/usr/local"
-#define _PKGS_URL "http://pkg.FreeBSD.org"
-
-static const char *
-elf_corres_to_string(struct _elf_corres *m, int e)
-{
- int i;
-
- for (i = 0; m[i].string != NULL; i++)
- if (m[i].elf_nb == e)
- return (m[i].string);
-
- return ("unknown");
-}
-
-static int
-pkg_get_myabi(char *dest, size_t sz)
-{
- Elf *elf;
- Elf_Data *data;
- Elf_Note note;
- Elf_Scn *scn;
- char *src, *osname;
- const char *abi;
- GElf_Ehdr elfhdr;
- GElf_Shdr shdr;
- int fd, i, ret;
- uint32_t version;
-
- version = 0;
- ret = -1;
- scn = NULL;
- abi = NULL;
-
- if (elf_version(EV_CURRENT) == EV_NONE) {
- warnx("ELF library initialization failed: %s",
- elf_errmsg(-1));
- return (-1);
- }
-
- if ((fd = open("/bin/sh", O_RDONLY)) < 0) {
- warn("open()");
- return (-1);
- }
-
- if ((elf = elf_begin(fd, ELF_C_READ, NULL)) == NULL) {
- ret = -1;
- warnx("elf_begin() failed: %s.", elf_errmsg(-1));
- goto cleanup;
- }
-
- if (gelf_getehdr(elf, &elfhdr) == NULL) {
- ret = -1;
- warn("getehdr() failed: %s.", elf_errmsg(-1));
- goto cleanup;
- }
-
- while ((scn = elf_nextscn(elf, scn)) != NULL) {
- if (gelf_getshdr(scn, &shdr) != &shdr) {
- ret = -1;
- warn("getshdr() failed: %s.", elf_errmsg(-1));
- goto cleanup;
- }
-
- if (shdr.sh_type == SHT_NOTE)
- break;
- }
-
- if (scn == NULL) {
- ret = -1;
- warn("failed to get the note section");
- goto cleanup;
- }
-
- data = elf_getdata(scn, NULL);
- src = data->d_buf;
- for (;;) {
- memcpy(&note, src, sizeof(Elf_Note));
- src += sizeof(Elf_Note);
- if (note.n_type == NT_VERSION)
- break;
- src += note.n_namesz + note.n_descsz;
- }
- osname = src;
- src += note.n_namesz;
- if (elfhdr.e_ident[EI_DATA] == ELFDATA2MSB)
- version = be32dec(src);
- else
- version = le32dec(src);
-
- for (i = 0; osname[i] != '\0'; i++)
- osname[i] = (char)tolower(osname[i]);
-
- snprintf(dest, sz, "%s:%d:%s:%s",
- osname, version / 100000,
- elf_corres_to_string(mach_corres, (int)elfhdr.e_machine),
- elf_corres_to_string(wordsize_corres,
- (int)elfhdr.e_ident[EI_CLASS]));
-
- ret = 0;
-
- switch (elfhdr.e_machine) {
- case EM_ARM:
- snprintf(dest + strlen(dest), sz - strlen(dest),
- ":%s:%s:%s", elf_corres_to_string(endian_corres,
- (int)elfhdr.e_ident[EI_DATA]),
- (elfhdr.e_flags & EF_ARM_NEW_ABI) > 0 ?
- "eabi" : "oabi",
- (elfhdr.e_flags & EF_ARM_VFP_FLOAT) > 0 ?
- "softfp" : "vfp");
- break;
- case EM_MIPS:
- /*
- * this is taken from binutils sources:
- * include/elf/mips.h
- * mapping is figured out from binutils:
- * gas/config/tc-mips.c
- */
- switch (elfhdr.e_flags & EF_MIPS_ABI) {
- case E_MIPS_ABI_O32:
- abi = "o32";
- break;
- case E_MIPS_ABI_N32:
- abi = "n32";
- break;
- default:
- if (elfhdr.e_ident[EI_DATA] ==
- ELFCLASS32)
- abi = "o32";
- else if (elfhdr.e_ident[EI_DATA] ==
- ELFCLASS64)
- abi = "n64";
- break;
- }
- snprintf(dest + strlen(dest), sz - strlen(dest),
- ":%s:%s", elf_corres_to_string(endian_corres,
- (int)elfhdr.e_ident[EI_DATA]), abi);
- break;
- }
-
-cleanup:
- if (elf != NULL)
- elf_end(elf);
-
- close(fd);
- return (ret);
-}
+#include "config.h"
static int
extract_pkg_static(int fd, char *p, int sz)
@@ -291,8 +139,8 @@ bootstrap_pkg(void)
char zone[MAXHOSTNAMELEN + 13];
char url[MAXPATHLEN];
char conf[MAXPATHLEN];
- char abi[BUFSIZ];
char tmppkg[MAXPATHLEN];
+ const char *packagesite, *mirror_type;
char buf[10240];
char pkgstatic[MAXPATHLEN];
int fd, retry, ret, max_retry;
@@ -311,17 +159,15 @@ bootstrap_pkg(void)
printf("Bootstrapping pkg please wait\n");
- if (pkg_get_myabi(abi, MAXPATHLEN) != 0) {
- warnx("failed to determine the system ABI");
+ if (config_string(PACKAGESITE, &packagesite) != 0) {
+ warnx("No PACKAGESITE defined");
return (-1);
}
-
- if (getenv("PACKAGESITE") != NULL)
- snprintf(url, MAXPATHLEN, "%s/Latest/pkg.txz", getenv("PACKAGESITE"));
- else
- snprintf(url, MAXPATHLEN, "%s/%s/latest/Latest/pkg.txz",
- getenv("PACKAGEROOT") ? getenv("PACKAGEROOT") : _PKGS_URL,
- getenv("ABI") ? getenv("ABI") : abi);
+ if (config_string(MIRROR_TYPE, &mirror_type) != 0) {
+ warnx("No MIRROR_TYPE defined");
+ return (-1);
+ }
+ snprintf(url, MAXPATHLEN, "%s/latest/Latest/pkg.txz", packagesite);
snprintf(tmppkg, MAXPATHLEN, "%s/pkg.txz.XXXXXX",
getenv("TMPDIR") ? getenv("TMPDIR") : _PATH_TMP);
@@ -336,10 +182,10 @@ bootstrap_pkg(void)
u = fetchParseURL(url);
while (remote == NULL) {
if (retry == max_retry) {
- if (strcmp(u->scheme, "file") != 0) {
+ if (strcmp(u->scheme, "file") != 0 &&
+ strcasecmp(mirror_type, "srv") == 0) {
snprintf(zone, sizeof(zone),
"_%s._tcp.%s", u->scheme, u->host);
- printf("%s\n", zone);
mirrors = dns_getsrvinfo(zone);
current = mirrors;
}
@@ -449,6 +295,7 @@ int
main(__unused int argc, char *argv[])
{
char pkgpath[MAXPATHLEN];
+ bool yes = false;
snprintf(pkgpath, MAXPATHLEN, "%s/sbin/pkg",
getenv("LOCALBASE") ? getenv("LOCALBASE") : _LOCALBASE);
@@ -467,7 +314,9 @@ main(__unused int argc, char *argv[])
* not tty. Check the environment to see if user has answer
* tucked in there already.
*/
- if (getenv("ASSUME_ALWAYS_YES") == NULL) {
+ config_init();
+ config_bool(ASSUME_ALWAYS_YES, &yes);
+ if (!yes) {
printf("%s", confirmation_message);
if (!isatty(fileno(stdin)))
exit(EXIT_FAILURE);
@@ -477,6 +326,7 @@ main(__unused int argc, char *argv[])
}
if (bootstrap_pkg() != 0)
exit(EXIT_FAILURE);
+ config_finish();
}
execv(pkgpath, argv);
diff --git a/usr.sbin/rtadvd/rtadvd.c b/usr.sbin/rtadvd/rtadvd.c
index 4d2a527..880b283 100644
--- a/usr.sbin/rtadvd/rtadvd.c
+++ b/usr.sbin/rtadvd/rtadvd.c
@@ -1008,6 +1008,8 @@ set_short_delay(struct ifinfo *ifi)
long delay; /* must not be greater than 1000000 */
struct timeval interval, now, min_delay, tm_tmp, *rest;
+ if (ifi->ifi_ra_timer == NULL)
+ return;
/*
* Compute a random delay. If the computed value
* corresponds to a time later than the time the next
OpenPOWER on IntegriCloud