summaryrefslogtreecommitdiffstats
path: root/sys/dev/ntb/ntb_hw
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ntb/ntb_hw')
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_hw.c1356
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_hw.h125
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_regs.h3
3 files changed, 555 insertions, 929 deletions
diff --git a/sys/dev/ntb/ntb_hw/ntb_hw.c b/sys/dev/ntb/ntb_hw/ntb_hw.c
index c1381f0..dac3699 100644
--- a/sys/dev/ntb/ntb_hw/ntb_hw.c
+++ b/sys/dev/ntb/ntb_hw/ntb_hw.c
@@ -1,4 +1,5 @@
/*-
+ * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
* Copyright (C) 2013 Intel Corporation
* Copyright (C) 2015 EMC Corporation
* All rights reserved.
@@ -25,6 +26,16 @@
* SUCH DAMAGE.
*/
+/*
+ * The Non-Transparent Bridge (NTB) is a device that allows you to connect
+ * two or more systems using a PCI-e links, providing remote memory access.
+ *
+ * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs.
+ *
+ * NOTE: Much of the code in this module is shared with Linux. Any patches may
+ * be picked up and redistributed in Linux with a dual GPL/BSD license.
+ */
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -33,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
+#include <sys/interrupt.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
@@ -50,19 +62,7 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pcivar.h>
#include "ntb_regs.h"
-#include "ntb_hw.h"
-
-/*
- * The Non-Transparent Bridge (NTB) is a device on some Intel processors that
- * allows you to connect two systems using a PCI-e link.
- *
- * This module contains the hardware abstraction layer for the NTB. It allows
- * you to send and receive interrupts, map the memory windows and send and
- * receive messages in the scratch-pad registers.
- *
- * NOTE: Much of the code in this module is shared with Linux. Any patches may
- * be picked up and redistributed in Linux with a dual GPL/BSD license.
- */
+#include "../ntb.h"
#define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT)
@@ -70,8 +70,6 @@ __FBSDID("$FreeBSD$");
#define ATOM_LINK_RECOVERY_TIME 500 /* ms */
#define BAR_HIGH_MASK (~((1ull << 12) - 1))
-#define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev))
-
#define NTB_MSIX_VER_GUARD 0xaabbccdd
#define NTB_MSIX_RECEIVED 0xe0f0e0f0
@@ -122,8 +120,8 @@ enum {
};
/* Device features and workarounds */
-#define HAS_FEATURE(feature) \
- ((ntb->features & (feature)) != 0)
+#define HAS_FEATURE(ntb, feature) \
+ (((ntb)->features & (feature)) != 0)
struct ntb_hw_info {
uint32_t device_id;
@@ -202,6 +200,9 @@ struct ntb_msix_data {
};
struct ntb_softc {
+ /* ntb.c context. Do not move! Must go first! */
+ void *ntb_store;
+
device_t device;
enum ntb_device_type type;
uint32_t features;
@@ -220,13 +221,7 @@ struct ntb_softc {
struct callout heartbeat_timer;
struct callout lr_timer;
- void *ntb_ctx;
- const struct ntb_ctx_ops *ctx_ops;
struct ntb_vec *msix_vec;
-#define CTX_LOCK(sc) mtx_lock(&(sc)->ctx_lock)
-#define CTX_UNLOCK(sc) mtx_unlock(&(sc)->ctx_lock)
-#define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f))
- struct mtx ctx_lock;
uint32_t ppd;
enum ntb_conn_type conn_type;
@@ -258,6 +253,7 @@ struct ntb_softc {
uint64_t db_valid_mask;
uint64_t db_link_mask;
uint64_t db_mask;
+ uint64_t fake_db_bell; /* NTB_SB01BASE_LOCKUP*/
int last_ts; /* ticks @ last irq */
@@ -287,61 +283,74 @@ bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
}
#endif
-#define ntb_bar_read(SIZE, bar, offset) \
+#define intel_ntb_bar_read(SIZE, bar, offset) \
bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
ntb->bar_info[(bar)].pci_bus_handle, (offset))
-#define ntb_bar_write(SIZE, bar, offset, val) \
+#define intel_ntb_bar_write(SIZE, bar, offset, val) \
bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
-#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
-#define ntb_reg_write(SIZE, offset, val) \
- ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
-#define ntb_mw_read(SIZE, offset) \
- ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset)
-#define ntb_mw_write(SIZE, offset, val) \
- ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
+#define intel_ntb_reg_read(SIZE, offset) \
+ intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
+#define intel_ntb_reg_write(SIZE, offset, val) \
+ intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
+#define intel_ntb_mw_read(SIZE, offset) \
+ intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
+ offset)
+#define intel_ntb_mw_write(SIZE, offset, val) \
+ intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
offset, val)
-static int ntb_probe(device_t device);
-static int ntb_attach(device_t device);
-static int ntb_detach(device_t device);
-static unsigned ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
-static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
+static int intel_ntb_probe(device_t device);
+static int intel_ntb_attach(device_t device);
+static int intel_ntb_detach(device_t device);
+static uint64_t intel_ntb_db_valid_mask(device_t dev);
+static void intel_ntb_spad_clear(device_t dev);
+static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector);
+static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed,
+ enum ntb_width *width);
+static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed,
+ enum ntb_width width);
+static int intel_ntb_link_disable(device_t dev);
+static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
+static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
+
+static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
+static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
uint32_t *base, uint32_t *xlat, uint32_t *lmt);
-static int ntb_map_pci_bars(struct ntb_softc *ntb);
-static int ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
+static int intel_ntb_map_pci_bars(struct ntb_softc *ntb);
+static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
vm_memattr_t);
static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
const char *);
static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
static int map_memory_window_bar(struct ntb_softc *ntb,
struct ntb_pci_bar_info *bar);
-static void ntb_unmap_pci_bar(struct ntb_softc *ntb);
-static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
-static int ntb_init_isr(struct ntb_softc *ntb);
-static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
-static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
-static void ntb_teardown_interrupts(struct ntb_softc *ntb);
-static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
-static void ntb_interrupt(struct ntb_softc *, uint32_t vec);
+static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb);
+static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
+static int intel_ntb_init_isr(struct ntb_softc *ntb);
+static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
+static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
+static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb);
+static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
+static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec);
static void ndev_vec_isr(void *arg);
static void ndev_irq_isr(void *arg);
static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t);
static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t);
-static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
-static void ntb_free_msix_vec(struct ntb_softc *ntb);
-static void ntb_get_msix_info(struct ntb_softc *ntb);
-static void ntb_exchange_msix(void *);
-static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id);
-static void ntb_detect_max_mw(struct ntb_softc *ntb);
-static int ntb_detect_xeon(struct ntb_softc *ntb);
-static int ntb_detect_atom(struct ntb_softc *ntb);
-static int ntb_xeon_init_dev(struct ntb_softc *ntb);
-static int ntb_atom_init_dev(struct ntb_softc *ntb);
-static void ntb_teardown_xeon(struct ntb_softc *ntb);
+static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
+static void intel_ntb_free_msix_vec(struct ntb_softc *ntb);
+static void intel_ntb_get_msix_info(struct ntb_softc *ntb);
+static void intel_ntb_exchange_msix(void *);
+static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id);
+static void intel_ntb_detect_max_mw(struct ntb_softc *ntb);
+static int intel_ntb_detect_xeon(struct ntb_softc *ntb);
+static int intel_ntb_detect_atom(struct ntb_softc *ntb);
+static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb);
+static int intel_ntb_atom_init_dev(struct ntb_softc *ntb);
+static void intel_ntb_teardown_xeon(struct ntb_softc *ntb);
static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
enum ntb_bar regbar);
@@ -351,18 +360,16 @@ static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr,
enum ntb_bar idx);
static int xeon_setup_b2b_mw(struct ntb_softc *,
const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr);
-static int xeon_setup_msix_bar(struct ntb_softc *);
static inline bool link_is_up(struct ntb_softc *ntb);
static inline bool _xeon_link_is_up(struct ntb_softc *ntb);
static inline bool atom_link_is_err(struct ntb_softc *ntb);
-static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *);
-static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *);
+static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *);
+static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *);
static void atom_link_hb(void *arg);
-static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec);
static void recover_atom_link(void *arg);
-static bool ntb_poll_link(struct ntb_softc *ntb);
+static bool intel_ntb_poll_link(struct ntb_softc *ntb);
static void save_bar_parameters(struct ntb_pci_bar_info *bar);
-static void ntb_sysctl_init(struct ntb_softc *);
+static void intel_ntb_sysctl_init(struct ntb_softc *);
static int sysctl_handle_features(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS);
@@ -372,7 +379,7 @@ static int sysctl_handle_register(SYSCTL_HANDLER_ARGS);
static unsigned g_ntb_hw_debug_level;
SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
&g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose");
-#define ntb_printf(lvl, ...) do { \
+#define intel_ntb_printf(lvl, ...) do { \
if ((lvl) <= g_ntb_hw_debug_level) { \
device_printf(ntb->device, __VA_ARGS__); \
} \
@@ -395,7 +402,7 @@ SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN,
"UC-: " __XSTRING(_NTB_PAT_UCM));
static inline vm_memattr_t
-ntb_pat_flags(void)
+intel_ntb_pat_flags(void)
{
switch (g_ntb_mw_pat) {
@@ -421,7 +428,7 @@ ntb_pat_flags(void)
* anywhere better yet.
*/
static inline const char *
-ntb_vm_memattr_to_str(vm_memattr_t pat)
+intel_ntb_vm_memattr_to_str(vm_memattr_t pat)
{
switch (pat) {
@@ -442,7 +449,7 @@ ntb_vm_memattr_to_str(vm_memattr_t pat)
}
}
-static int g_ntb_msix_idx = 0;
+static int g_ntb_msix_idx = 1;
SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx,
0, "Use this memory window to access the peer MSIX message complex on "
"certain Xeon-based NTB systems, as a workaround for a hardware errata. "
@@ -457,6 +464,18 @@ SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx,
"available memory window. Both sides of the NTB MUST set the same "
"value here! (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)");
+/* Hardware owns the low 16 bits of features. */
+#define NTB_BAR_SIZE_4K (1 << 0)
+#define NTB_SDOORBELL_LOCKUP (1 << 1)
+#define NTB_SB01BASE_LOCKUP (1 << 2)
+#define NTB_B2BDOORBELL_BIT14 (1 << 3)
+/* Software/configuration owns the top 16 bits. */
+#define NTB_SPLIT_BAR (1ull << 16)
+
+#define NTB_FEATURES_STR \
+ "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \
+ "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K"
+
static struct ntb_hw_info pci_ids[] = {
/* XXX: PS/SS IDs left out until they are supported. */
{ 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B",
@@ -597,35 +616,15 @@ SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN,
*/
MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
-static device_method_t ntb_pci_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, ntb_probe),
- DEVMETHOD(device_attach, ntb_attach),
- DEVMETHOD(device_detach, ntb_detach),
- DEVMETHOD_END
-};
-
-static driver_t ntb_pci_driver = {
- "ntb_hw",
- ntb_pci_methods,
- sizeof(struct ntb_softc),
-};
-
-static devclass_t ntb_devclass;
-DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL);
-MODULE_VERSION(ntb_hw, 1);
-
-SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls");
-
/*
* OS <-> Driver linkage functions
*/
static int
-ntb_probe(device_t device)
+intel_ntb_probe(device_t device)
{
struct ntb_hw_info *p;
- p = ntb_get_device_info(pci_get_devid(device));
+ p = intel_ntb_get_device_info(pci_get_devid(device));
if (p == NULL)
return (ENXIO);
@@ -634,14 +633,14 @@ ntb_probe(device_t device)
}
static int
-ntb_attach(device_t device)
+intel_ntb_attach(device_t device)
{
struct ntb_softc *ntb;
struct ntb_hw_info *p;
int error;
- ntb = DEVICE2SOFTC(device);
- p = ntb_get_device_info(pci_get_devid(device));
+ ntb = device_get_softc(device);
+ p = intel_ntb_get_device_info(pci_get_devid(device));
ntb->device = device;
ntb->type = p->type;
@@ -654,47 +653,52 @@ ntb_attach(device_t device)
callout_init(&ntb->lr_timer, 1);
callout_init(&ntb->peer_msix_work, 1);
mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
- mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_DEF);
if (ntb->type == NTB_ATOM)
- error = ntb_detect_atom(ntb);
+ error = intel_ntb_detect_atom(ntb);
else
- error = ntb_detect_xeon(ntb);
+ error = intel_ntb_detect_xeon(ntb);
if (error != 0)
goto out;
- ntb_detect_max_mw(ntb);
+ intel_ntb_detect_max_mw(ntb);
pci_enable_busmaster(ntb->device);
- error = ntb_map_pci_bars(ntb);
+ error = intel_ntb_map_pci_bars(ntb);
if (error != 0)
goto out;
if (ntb->type == NTB_ATOM)
- error = ntb_atom_init_dev(ntb);
+ error = intel_ntb_atom_init_dev(ntb);
else
- error = ntb_xeon_init_dev(ntb);
+ error = intel_ntb_xeon_init_dev(ntb);
if (error != 0)
goto out;
- ntb_spad_clear(ntb);
+ intel_ntb_spad_clear(device);
+
+ intel_ntb_poll_link(ntb);
- ntb_poll_link(ntb);
+ intel_ntb_sysctl_init(ntb);
- ntb_sysctl_init(ntb);
+ /* Attach children to this controller */
+ error = ntb_register_device(device);
out:
if (error != 0)
- ntb_detach(device);
+ intel_ntb_detach(device);
return (error);
}
static int
-ntb_detach(device_t device)
+intel_ntb_detach(device_t device)
{
struct ntb_softc *ntb;
- ntb = DEVICE2SOFTC(device);
+ ntb = device_get_softc(device);
+
+ /* Detach & delete all children */
+ ntb_unregister_device(device);
if (ntb->self_reg != NULL) {
DB_MASK_LOCK(ntb);
@@ -706,13 +710,12 @@ ntb_detach(device_t device)
callout_drain(&ntb->peer_msix_work);
pci_disable_busmaster(ntb->device);
if (ntb->type == NTB_XEON)
- ntb_teardown_xeon(ntb);
- ntb_teardown_interrupts(ntb);
+ intel_ntb_teardown_xeon(ntb);
+ intel_ntb_teardown_interrupts(ntb);
mtx_destroy(&ntb->db_mask_lock);
- mtx_destroy(&ntb->ctx_lock);
- ntb_unmap_pci_bar(ntb);
+ intel_ntb_unmap_pci_bar(ntb);
return (0);
}
@@ -721,7 +724,7 @@ ntb_detach(device_t device)
* Driver internal routines
*/
static inline enum ntb_bar
-ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
+intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
{
KASSERT(mw < ntb->mw_count,
@@ -736,7 +739,7 @@ bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar)
{
/* XXX This assertion could be stronger. */
KASSERT(bar < NTB_MAX_BARS, ("bogus bar"));
- return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR));
+ return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR));
}
static inline void
@@ -777,7 +780,7 @@ bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
}
static int
-ntb_map_pci_bars(struct ntb_softc *ntb)
+intel_ntb_map_pci_bars(struct ntb_softc *ntb)
{
int rc;
@@ -802,7 +805,7 @@ ntb_map_pci_bars(struct ntb_softc *ntb)
ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET;
ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET;
- if (!HAS_FEATURE(NTB_SPLIT_BAR))
+ if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR))
goto out;
ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5);
@@ -876,7 +879,7 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
* but the PCI driver does not honor the size in this call, so we have
* to modify it after the fact.
*/
- if (HAS_FEATURE(NTB_BAR_SIZE_4K)) {
+ if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) {
if (bar->pci_resource_id == PCIR_BAR(2))
bar_size_bits = pci_read_config(ntb->device,
XEON_PBAR23SZ_OFFSET, 1);
@@ -903,7 +906,7 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
* Optionally, mark MW BARs as anything other than UC to improve
* performance.
*/
- mapmode = ntb_pat_flags();
+ mapmode = intel_ntb_pat_flags();
if (mapmode == bar->map_mode)
return (0);
@@ -916,7 +919,7 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
(char *)bar->vbase + bar->size - 1,
(void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
- ntb_vm_memattr_to_str(mapmode));
+ intel_ntb_vm_memattr_to_str(mapmode));
} else
device_printf(ntb->device,
"Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as "
@@ -924,13 +927,13 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
(char *)bar->vbase + bar->size - 1,
(void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
- ntb_vm_memattr_to_str(mapmode), rc);
+ intel_ntb_vm_memattr_to_str(mapmode), rc);
/* Proceed anyway */
return (0);
}
static void
-ntb_unmap_pci_bar(struct ntb_softc *ntb)
+intel_ntb_unmap_pci_bar(struct ntb_softc *ntb)
{
struct ntb_pci_bar_info *current_bar;
int i;
@@ -945,7 +948,7 @@ ntb_unmap_pci_bar(struct ntb_softc *ntb)
}
static int
-ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
+intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
{
uint32_t i;
int rc;
@@ -998,7 +1001,7 @@ SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
* round-robin fashion.
*/
static int
-ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
+intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
{
u_int *vectors;
uint32_t i;
@@ -1018,7 +1021,7 @@ ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
}
static int
-ntb_init_isr(struct ntb_softc *ntb)
+intel_ntb_init_isr(struct ntb_softc *ntb)
{
uint32_t desired_vectors, num_vectors;
int rc;
@@ -1044,7 +1047,7 @@ ntb_init_isr(struct ntb_softc *ntb)
num_vectors--;
if (rc == 0 && num_vectors < desired_vectors) {
- rc = ntb_remap_msix(ntb->device, desired_vectors,
+ rc = intel_ntb_remap_msix(ntb->device, desired_vectors,
num_vectors);
if (rc == 0)
num_vectors = desired_vectors;
@@ -1057,7 +1060,7 @@ ntb_init_isr(struct ntb_softc *ntb)
num_vectors = 1;
if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) {
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
device_printf(ntb->device,
"Errata workaround does not support MSI or INTX\n");
return (EINVAL);
@@ -1065,32 +1068,30 @@ ntb_init_isr(struct ntb_softc *ntb)
ntb->db_vec_count = 1;
ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT;
- rc = ntb_setup_legacy_interrupt(ntb);
+ rc = intel_ntb_setup_legacy_interrupt(ntb);
} else {
if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS &&
- HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
+ HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
device_printf(ntb->device,
"Errata workaround expects %d doorbell bits\n",
XEON_NONLINK_DB_MSIX_BITS);
return (EINVAL);
}
- ntb_create_msix_vec(ntb, num_vectors);
- rc = ntb_setup_msix(ntb, num_vectors);
- if (rc == 0 && HAS_FEATURE(NTB_SB01BASE_LOCKUP))
- ntb_get_msix_info(ntb);
+ intel_ntb_create_msix_vec(ntb, num_vectors);
+ rc = intel_ntb_setup_msix(ntb, num_vectors);
}
if (rc != 0) {
device_printf(ntb->device,
"Error allocating interrupts: %d\n", rc);
- ntb_free_msix_vec(ntb);
+ intel_ntb_free_msix_vec(ntb);
}
return (rc);
}
static int
-ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
+intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
{
int rc;
@@ -1117,7 +1118,7 @@ ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
}
static void
-ntb_teardown_interrupts(struct ntb_softc *ntb)
+intel_ntb_teardown_interrupts(struct ntb_softc *ntb)
{
struct ntb_int_info *current_int;
int i;
@@ -1133,7 +1134,7 @@ ntb_teardown_interrupts(struct ntb_softc *ntb)
rman_get_rid(current_int->res), current_int->res);
}
- ntb_free_msix_vec(ntb);
+ intel_ntb_free_msix_vec(ntb);
pci_release_msi(ntb->device);
}
@@ -1146,11 +1147,11 @@ db_ioread(struct ntb_softc *ntb, uint64_t regoff)
{
if (ntb->type == NTB_ATOM)
- return (ntb_reg_read(8, regoff));
+ return (intel_ntb_reg_read(8, regoff));
KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
- return (ntb_reg_read(2, regoff));
+ return (intel_ntb_reg_read(2, regoff));
}
static inline void
@@ -1172,89 +1173,78 @@ db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
{
if (ntb->type == NTB_ATOM) {
- ntb_reg_write(8, regoff, val);
+ intel_ntb_reg_write(8, regoff, val);
return;
}
KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
- ntb_reg_write(2, regoff, (uint16_t)val);
+ intel_ntb_reg_write(2, regoff, (uint16_t)val);
}
-void
-ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits)
+static void
+intel_ntb_db_set_mask(device_t dev, uint64_t bits)
{
-
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP))
- return;
+ struct ntb_softc *ntb = device_get_softc(dev);
DB_MASK_LOCK(ntb);
ntb->db_mask |= bits;
- db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
+ if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
+ db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
DB_MASK_UNLOCK(ntb);
}
-void
-ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits)
+static void
+intel_ntb_db_clear_mask(device_t dev, uint64_t bits)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
+ uint64_t ibits;
+ int i;
KASSERT((bits & ~ntb->db_valid_mask) == 0,
("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
(uintmax_t)(bits & ~ntb->db_valid_mask),
(uintmax_t)ntb->db_valid_mask));
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP))
- return;
-
DB_MASK_LOCK(ntb);
+ ibits = ntb->fake_db_bell & ntb->db_mask & bits;
ntb->db_mask &= ~bits;
- db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
+ /* Simulate fake interrupts if unmasked DB bits are set. */
+ for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
+ if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0)
+ swi_sched(ntb->int_info[i].tag, 0);
+ }
+ } else {
+ db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
+ }
DB_MASK_UNLOCK(ntb);
}
-uint64_t
-ntb_db_read(struct ntb_softc *ntb)
+static uint64_t
+intel_ntb_db_read(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
- uint64_t res;
- unsigned i;
-
- res = 0;
- for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- if (ntb->msix_vec[i].masked != 0)
- res |= ntb_db_vector_mask(ntb, i);
- }
- return (res);
- }
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
+ return (ntb->fake_db_bell);
return (db_ioread(ntb, ntb->self_reg->db_bell));
}
-void
-ntb_db_clear(struct ntb_softc *ntb, uint64_t bits)
+static void
+intel_ntb_db_clear(device_t dev, uint64_t bits)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
KASSERT((bits & ~ntb->db_valid_mask) == 0,
("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
(uintmax_t)(bits & ~ntb->db_valid_mask),
(uintmax_t)ntb->db_valid_mask));
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
- unsigned i;
-
- for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- if ((bits & ntb_db_vector_mask(ntb, i)) != 0) {
- DB_MASK_LOCK(ntb);
- if (ntb->msix_vec[i].masked != 0) {
- /* XXX These need a public API. */
-#if 0
- pci_unmask_msix(ntb->device, i);
-#endif
- ntb->msix_vec[i].masked = 0;
- }
- DB_MASK_UNLOCK(ntb);
- }
- }
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
+ DB_MASK_LOCK(ntb);
+ ntb->fake_db_bell &= ~bits;
+ DB_MASK_UNLOCK(ntb);
return;
}
@@ -1262,43 +1252,59 @@ ntb_db_clear(struct ntb_softc *ntb, uint64_t bits)
}
static inline uint64_t
-ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
+intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
{
uint64_t shift, mask;
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
+ /*
+ * Remap vectors in custom way to make at least first
+ * three doorbells to not generate stray events.
+ * This breaks Linux compatibility (if one existed)
+ * when more then one DB is used (not by if_ntb).
+ */
+ if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1)
+ return (1 << db_vector);
+ if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1)
+ return (0x7ffc);
+ }
+
shift = ntb->db_vec_shift;
mask = (1ull << shift) - 1;
return (mask << (shift * db_vector));
}
static void
-ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
+intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
{
uint64_t vec_mask;
ntb->last_ts = ticks;
- vec_mask = ntb_vec_mask(ntb, vec);
+ vec_mask = intel_ntb_vec_mask(ntb, vec);
if ((vec_mask & ntb->db_link_mask) != 0) {
- if (ntb_poll_link(ntb))
- ntb_link_event(ntb);
+ if (intel_ntb_poll_link(ntb))
+ ntb_link_event(ntb->device);
}
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP) &&
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
(vec_mask & ntb->db_link_mask) == 0) {
DB_MASK_LOCK(ntb);
- if (ntb->msix_vec[vec].masked == 0) {
- /* XXX These need a public API. */
-#if 0
- pci_mask_msix(ntb->device, vec);
-#endif
- ntb->msix_vec[vec].masked = 1;
- }
+
+ /* Do not report same DB events again if not cleared yet. */
+ vec_mask &= ~ntb->fake_db_bell;
+
+ /* Update our internal doorbell register. */
+ ntb->fake_db_bell |= vec_mask;
+
+ /* Do not report masked DB events. */
+ vec_mask &= ~ntb->db_mask;
+
DB_MASK_UNLOCK(ntb);
}
if ((vec_mask & ntb->db_valid_mask) != 0)
- ntb_db_event(ntb, vec);
+ ntb_db_event(ntb->device, vec);
}
static void
@@ -1306,18 +1312,18 @@ ndev_vec_isr(void *arg)
{
struct ntb_vec *nvec = arg;
- ntb_interrupt(nvec->ntb, nvec->num);
+ intel_ntb_interrupt(nvec->ntb, nvec->num);
}
static void
ndev_irq_isr(void *arg)
{
/* If we couldn't set up MSI-X, we only have the one vector. */
- ntb_interrupt(arg, 0);
+ intel_ntb_interrupt(arg, 0);
}
static int
-ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
+intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
{
uint32_t i;
@@ -1332,7 +1338,7 @@ ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
}
static void
-ntb_free_msix_vec(struct ntb_softc *ntb)
+intel_ntb_free_msix_vec(struct ntb_softc *ntb)
{
if (ntb->msix_vec == NULL)
@@ -1343,7 +1349,7 @@ ntb_free_msix_vec(struct ntb_softc *ntb)
}
static void
-ntb_get_msix_info(struct ntb_softc *ntb)
+intel_ntb_get_msix_info(struct ntb_softc *ntb)
{
struct pci_devinfo *dinfo;
struct pcicfg_msix *msix;
@@ -1352,8 +1358,6 @@ ntb_get_msix_info(struct ntb_softc *ntb)
dinfo = device_get_ivars(ntb->device);
msix = &dinfo->cfg.msix;
- laddr = data = 0;
-
CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data));
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
@@ -1361,7 +1365,7 @@ ntb_get_msix_info(struct ntb_softc *ntb)
laddr = bus_read_4(msix->msix_table_res, offset +
PCI_MSIX_ENTRY_LOWER_ADDR);
- ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
+ intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE,
("local MSIX addr 0x%x not in MSI base 0x%x", laddr,
@@ -1370,14 +1374,14 @@ ntb_get_msix_info(struct ntb_softc *ntb)
data = bus_read_4(msix->msix_table_res, offset +
PCI_MSIX_ENTRY_DATA);
- ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
+ intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
ntb->msix_data[i].nmd_data = data;
}
}
static struct ntb_hw_info *
-ntb_get_device_info(uint32_t device_id)
+intel_ntb_get_device_info(uint32_t device_id)
{
struct ntb_hw_info *ep = pci_ids;
@@ -1390,15 +1394,15 @@ ntb_get_device_info(uint32_t device_id)
}
static void
-ntb_teardown_xeon(struct ntb_softc *ntb)
+intel_ntb_teardown_xeon(struct ntb_softc *ntb)
{
if (ntb->reg != NULL)
- ntb_link_disable(ntb);
+ intel_ntb_link_disable(ntb->device);
}
static void
-ntb_detect_max_mw(struct ntb_softc *ntb)
+intel_ntb_detect_max_mw(struct ntb_softc *ntb)
{
if (ntb->type == NTB_ATOM) {
@@ -1406,14 +1410,14 @@ ntb_detect_max_mw(struct ntb_softc *ntb)
return;
}
- if (HAS_FEATURE(NTB_SPLIT_BAR))
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT;
else
ntb->mw_count = XEON_SNB_MW_COUNT;
}
static int
-ntb_detect_xeon(struct ntb_softc *ntb)
+intel_ntb_detect_xeon(struct ntb_softc *ntb)
{
uint8_t ppd, conn_type;
@@ -1428,11 +1432,21 @@ ntb_detect_xeon(struct ntb_softc *ntb)
if ((ppd & XEON_PPD_SPLIT_BAR) != 0)
ntb->features |= NTB_SPLIT_BAR;
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
+ !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
+ device_printf(ntb->device,
+ "Can not apply SB01BASE_LOCKUP workaround "
+ "with split BARs disabled!\n");
+ device_printf(ntb->device,
+ "Expect system hangs under heavy NTB traffic!\n");
+ ntb->features &= ~NTB_SB01BASE_LOCKUP;
+ }
+
/*
* SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP
* errata workaround; only do one at a time.
*/
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP))
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
ntb->features &= ~NTB_SDOORBELL_LOCKUP;
conn_type = ppd & XEON_PPD_CONN_TYPE;
@@ -1451,7 +1465,7 @@ ntb_detect_xeon(struct ntb_softc *ntb)
}
static int
-ntb_detect_atom(struct ntb_softc *ntb)
+intel_ntb_detect_atom(struct ntb_softc *ntb)
{
uint32_t ppd, conn_type;
@@ -1476,7 +1490,7 @@ ntb_detect_atom(struct ntb_softc *ntb)
}
static int
-ntb_xeon_init_dev(struct ntb_softc *ntb)
+intel_ntb_xeon_init_dev(struct ntb_softc *ntb)
{
int rc;
@@ -1497,15 +1511,16 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
ntb->peer_reg = &xeon_b2b_reg;
ntb->xlat_reg = &xeon_sec_xlat;
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
+ ntb->fake_db_bell = 0;
ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) %
ntb->mw_count;
- ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
+ intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
g_ntb_msix_idx, ntb->msix_mw_idx);
- rc = ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
+ rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
VM_MEMATTR_UNCACHEABLE);
KASSERT(rc == 0, ("shouldn't fail"));
- } else if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) {
+ } else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
/*
* There is a Xeon hardware errata related to writes to SDOORBELL or
* B2BDOORBELL in conjunction with inbound access to NTB MMIO space,
@@ -1515,12 +1530,12 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
*/
ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) %
ntb->mw_count;
- ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
+ intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
g_ntb_mw_idx, ntb->b2b_mw_idx);
- rc = ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
+ rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
VM_MEMATTR_UNCACHEABLE);
KASSERT(rc == 0, ("shouldn't fail"));
- } else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14))
+ } else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14))
/*
* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
* mirrored to the remote system. Shrink the number of bits by one,
@@ -1543,7 +1558,7 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
return (rc);
/* Enable Bus Master and Memory Space on the secondary side */
- ntb_reg_write(2, XEON_SPCICMD_OFFSET,
+ intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET,
PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
/*
@@ -1554,16 +1569,12 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
DB_MASK_UNLOCK(ntb);
- rc = xeon_setup_msix_bar(ntb);
- if (rc != 0)
- return (rc);
-
- rc = ntb_init_isr(ntb);
+ rc = intel_ntb_init_isr(ntb);
return (rc);
}
static int
-ntb_atom_init_dev(struct ntb_softc *ntb)
+intel_ntb_atom_init_dev(struct ntb_softc *ntb)
{
int error;
@@ -1590,15 +1601,15 @@ ntb_atom_init_dev(struct ntb_softc *ntb)
configure_atom_secondary_side_bars(ntb);
/* Enable Bus Master and Memory Space on the secondary side */
- ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
+ intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
- error = ntb_init_isr(ntb);
+ error = intel_ntb_init_isr(ntb);
if (error != 0)
return (error);
/* Initiate PCI-E link training */
- ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
@@ -1611,19 +1622,19 @@ configure_atom_secondary_side_bars(struct ntb_softc *ntb)
{
if (ntb->dev_type == NTB_DEV_USD) {
- ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
XEON_B2B_BAR4_ADDR64);
- ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
} else {
- ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
XEON_B2B_BAR4_ADDR64);
- ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
}
}
@@ -1649,7 +1660,7 @@ xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx,
struct ntb_pci_bar_info *bar;
uint8_t bar_sz;
- if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
+ if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
return;
bar = &ntb->bar_info[idx];
@@ -1673,28 +1684,28 @@ xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
uint32_t base_reg, lmt_reg;
bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg);
- if (idx == regbar)
- bar_addr += ntb->b2b_off;
+ if (idx == regbar) {
+ if (ntb->b2b_off)
+ bar_addr += ntb->b2b_off;
+ else
+ bar_addr = 0;
+ }
- /*
- * Set limit registers first to avoid an errata where setting the base
- * registers locks the limit registers.
- */
if (!bar_is_64bit(ntb, idx)) {
- ntb_reg_write(4, lmt_reg, bar_addr);
- reg_val = ntb_reg_read(4, lmt_reg);
+ intel_ntb_reg_write(4, base_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(4, base_reg);
(void)reg_val;
- ntb_reg_write(4, base_reg, bar_addr);
- reg_val = ntb_reg_read(4, base_reg);
+ intel_ntb_reg_write(4, lmt_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(4, lmt_reg);
(void)reg_val;
} else {
- ntb_reg_write(8, lmt_reg, bar_addr);
- reg_val = ntb_reg_read(8, lmt_reg);
+ intel_ntb_reg_write(8, base_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(8, base_reg);
(void)reg_val;
- ntb_reg_write(8, base_reg, bar_addr);
- reg_val = ntb_reg_read(8, base_reg);
+ intel_ntb_reg_write(8, lmt_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(8, lmt_reg);
(void)reg_val;
}
}
@@ -1705,30 +1716,17 @@ xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
struct ntb_pci_bar_info *bar;
bar = &ntb->bar_info[idx];
- if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
- ntb_reg_write(4, bar->pbarxlat_off, base_addr);
- base_addr = ntb_reg_read(4, bar->pbarxlat_off);
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
+ intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr);
+ base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off);
} else {
- ntb_reg_write(8, bar->pbarxlat_off, base_addr);
- base_addr = ntb_reg_read(8, bar->pbarxlat_off);
+ intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr);
+ base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off);
}
(void)base_addr;
}
static int
-xeon_setup_msix_bar(struct ntb_softc *ntb)
-{
- enum ntb_bar bar_num;
-
- if (!HAS_FEATURE(NTB_SB01BASE_LOCKUP))
- return (0);
-
- bar_num = ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
- ntb->peer_lapic_bar = &ntb->bar_info[bar_num];
- return (0);
-}
-
-static int
xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
const struct ntb_b2b_addr *peer_addr)
{
@@ -1742,7 +1740,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
b2b_bar_num = NTB_CONFIG_BAR;
ntb->b2b_off = 0;
} else {
- b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
+ b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
("invalid b2b mw bar"));
@@ -1773,7 +1771,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
bar_addr = addr->bar0_addr;
else if (b2b_bar_num == NTB_B2B_BAR_1)
bar_addr = addr->bar2_addr64;
- else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR))
+ else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
bar_addr = addr->bar4_addr64;
else if (b2b_bar_num == NTB_B2B_BAR_2)
bar_addr = addr->bar4_addr32;
@@ -1782,7 +1780,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
else
KASSERT(false, ("invalid bar"));
- ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
+ intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
/*
* Other SBARs are normally hit by the PBAR xlat, except for the b2b
@@ -1793,7 +1791,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
*/
xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1,
b2b_bar_num);
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32,
NTB_B2B_BAR_2, b2b_bar_num);
xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32,
@@ -1803,56 +1801,41 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
NTB_B2B_BAR_2, b2b_bar_num);
/* Zero incoming translation addrs */
- ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
- ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
-
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
- size_t size, xlatoffset;
+ intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
- switch (ntb_mw_to_bar(ntb, ntb->msix_mw_idx)) {
- case NTB_B2B_BAR_1:
- size = 8;
- xlatoffset = XEON_SBAR2XLAT_OFFSET;
- break;
- case NTB_B2B_BAR_2:
- xlatoffset = XEON_SBAR4XLAT_OFFSET;
- if (HAS_FEATURE(NTB_SPLIT_BAR))
- size = 4;
- else
- size = 8;
- break;
- case NTB_B2B_BAR_3:
- xlatoffset = XEON_SBAR5XLAT_OFFSET;
- size = 4;
- break;
- default:
- KASSERT(false, ("Bogus msix mw idx: %u",
- ntb->msix_mw_idx));
- return (EINVAL);
- }
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
+ uint32_t xlat_reg, lmt_reg;
+ enum ntb_bar bar_num;
/*
* We point the chosen MSIX MW BAR xlat to remote LAPIC for
* workaround
*/
- if (size == 4) {
- ntb_reg_write(4, xlatoffset, MSI_INTEL_ADDR_BASE);
- ntb->msix_xlat = ntb_reg_read(4, xlatoffset);
+ bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
+ bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg);
+ if (bar_is_64bit(ntb, bar_num)) {
+ intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
+ ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg);
+ intel_ntb_reg_write(8, lmt_reg, 0);
} else {
- ntb_reg_write(8, xlatoffset, MSI_INTEL_ADDR_BASE);
- ntb->msix_xlat = ntb_reg_read(8, xlatoffset);
+ intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
+ ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg);
+ intel_ntb_reg_write(4, lmt_reg, 0);
}
+
+ ntb->peer_lapic_bar = &ntb->bar_info[bar_num];
}
- (void)ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
- (void)ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
+ (void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
+ (void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
/* Zero outgoing translation limits (whole bar size windows) */
- ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
- ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
/* Set outgoing translation offsets */
xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2);
xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3);
} else
@@ -1864,7 +1847,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
bar_addr = peer_addr->bar0_addr;
else if (b2b_bar_num == NTB_B2B_BAR_1)
bar_addr = peer_addr->bar2_addr64;
- else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR))
+ else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
bar_addr = peer_addr->bar4_addr64;
else if (b2b_bar_num == NTB_B2B_BAR_2)
bar_addr = peer_addr->bar4_addr32;
@@ -1877,8 +1860,8 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
* B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
* at a time.
*/
- ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
- ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
+ intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
+ intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
return (0);
}
@@ -1897,7 +1880,7 @@ link_is_up(struct ntb_softc *ntb)
if (ntb->type == NTB_XEON)
return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good ||
- !HAS_FEATURE(NTB_SB01BASE_LOCKUP)));
+ !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)));
KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0);
@@ -1910,11 +1893,11 @@ atom_link_is_err(struct ntb_softc *ntb)
KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
- status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
return (true);
- status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
}
@@ -1937,8 +1920,8 @@ atom_link_hb(void *arg)
goto out;
}
- if (ntb_poll_link(ntb))
- ntb_link_event(ntb);
+ if (intel_ntb_poll_link(ntb))
+ ntb_link_event(ntb->device);
if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
/* Link is down with error, proceed with recovery */
@@ -1956,166 +1939,47 @@ atom_perform_link_restart(struct ntb_softc *ntb)
uint32_t status;
/* Driver resets the NTB ModPhy lanes - magic! */
- ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
- ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
- ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
- ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
/* Driver waits 100ms to allow the NTB ModPhy to settle */
pause("ModPhy", hz / 10);
/* Clear AER Errors, write to clear */
- status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
status &= PCIM_AER_COR_REPLAY_ROLLOVER;
- ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
/* Clear unexpected electrical idle event in LTSSM, write to clear */
- status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
- ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
/* Clear DeSkew Buffer error, write to clear */
- status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
status |= ATOM_DESKEWSTS_DBERR;
- ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
- status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
status &= ATOM_IBIST_ERR_OFLOW;
- ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
/* Releases the NTB state machine to allow the link to retrain */
- status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
- ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
-}
-
-/*
- * ntb_set_ctx() - associate a driver context with an ntb device
- * @ntb: NTB device context
- * @ctx: Driver context
- * @ctx_ops: Driver context operations
- *
- * Associate a driver context and operations with a ntb device. The context is
- * provided by the client driver, and the driver may associate a different
- * context with each ntb device.
- *
- * Return: Zero if the context is associated, otherwise an error number.
- */
-int
-ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops)
-{
-
- if (ctx == NULL || ops == NULL)
- return (EINVAL);
- if (ntb->ctx_ops != NULL)
- return (EINVAL);
-
- CTX_LOCK(ntb);
- if (ntb->ctx_ops != NULL) {
- CTX_UNLOCK(ntb);
- return (EINVAL);
- }
- ntb->ntb_ctx = ctx;
- ntb->ctx_ops = ops;
- CTX_UNLOCK(ntb);
-
- return (0);
-}
-
-/*
- * It is expected that this will only be used from contexts where the ctx_lock
- * is not needed to protect ntb_ctx lifetime.
- */
-void *
-ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops)
-{
-
- KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus"));
- if (ops != NULL)
- *ops = ntb->ctx_ops;
- return (ntb->ntb_ctx);
-}
-
-/*
- * ntb_clear_ctx() - disassociate any driver context from an ntb device
- * @ntb: NTB device context
- *
- * Clear any association that may exist between a driver context and the ntb
- * device.
- */
-void
-ntb_clear_ctx(struct ntb_softc *ntb)
-{
-
- CTX_LOCK(ntb);
- ntb->ntb_ctx = NULL;
- ntb->ctx_ops = NULL;
- CTX_UNLOCK(ntb);
-}
-
-/*
- * ntb_link_event() - notify driver context of a change in link status
- * @ntb: NTB device context
- *
- * Notify the driver context that the link status may have changed. The driver
- * should call ntb_link_is_up() to get the current status.
- */
-void
-ntb_link_event(struct ntb_softc *ntb)
-{
-
- CTX_LOCK(ntb);
- if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL)
- ntb->ctx_ops->link_event(ntb->ntb_ctx);
- CTX_UNLOCK(ntb);
+ intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
}
-/*
- * ntb_db_event() - notify driver context of a doorbell event
- * @ntb: NTB device context
- * @vector: Interrupt vector number
- *
- * Notify the driver context of a doorbell event. If hardware supports
- * multiple interrupt vectors for doorbells, the vector number indicates which
- * vector received the interrupt. The vector number is relative to the first
- * vector used for doorbells, starting at zero, and must be less than
- * ntb_db_vector_count(). The driver may call ntb_db_read() to check which
- * doorbell bits need service, and ntb_db_vector_mask() to determine which of
- * those bits are associated with the vector number.
- */
-static void
-ntb_db_event(struct ntb_softc *ntb, uint32_t vec)
-{
-
- CTX_LOCK(ntb);
- if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL)
- ntb->ctx_ops->db_event(ntb->ntb_ctx, vec);
- CTX_UNLOCK(ntb);
-}
-
-/*
- * ntb_link_enable() - enable the link on the secondary side of the ntb
- * @ntb: NTB device context
- * @max_speed: The maximum link speed expressed as PCIe generation number[0]
- * @max_width: The maximum link width expressed as the number of PCIe lanes[0]
- *
- * Enable the link on the secondary side of the ntb. This can only be done
- * from the primary side of the ntb in primary or b2b topology. The ntb device
- * should train the link to its maximum speed and width, or the requested speed
- * and width, whichever is smaller, if supported.
- *
- * Return: Zero on success, otherwise an error number.
- *
- * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed
- * and width input will be ignored.
- */
-int
-ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused,
- enum ntb_width w __unused)
+static int
+intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
+ enum ntb_width width __unused)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
uint32_t cntl;
- ntb_printf(2, "%s\n", __func__);
+ intel_ntb_printf(2, "%s\n", __func__);
if (ntb->type == NTB_ATOM) {
pci_write_config(ntb->device, NTB_PPD_OFFSET,
@@ -2124,57 +1988,47 @@ ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused,
}
if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
- ntb_link_event(ntb);
+ ntb_link_event(dev);
return (0);
}
- cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
- if (HAS_FEATURE(NTB_SPLIT_BAR))
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
- ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
+ intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
return (0);
}
-/*
- * ntb_link_disable() - disable the link on the secondary side of the ntb
- * @ntb: NTB device context
- *
- * Disable the link on the secondary side of the ntb. This can only be done
- * from the primary side of the ntb in primary or b2b topology. The ntb device
- * should disable the link. Returning from this call must indicate that a
- * barrier has passed, though with no more writes may pass in either direction
- * across the link, except if this call returns an error number.
- *
- * Return: Zero on success, otherwise an error number.
- */
-int
-ntb_link_disable(struct ntb_softc *ntb)
+static int
+intel_ntb_link_disable(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
uint32_t cntl;
- ntb_printf(2, "%s\n", __func__);
+ intel_ntb_printf(2, "%s\n", __func__);
if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
- ntb_link_event(ntb);
+ ntb_link_event(dev);
return (0);
}
- cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
- if (HAS_FEATURE(NTB_SPLIT_BAR))
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
- ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
+ intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
return (0);
}
-bool
-ntb_link_enabled(struct ntb_softc *ntb)
+static bool
+intel_ntb_link_enabled(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
uint32_t cntl;
if (ntb->type == NTB_ATOM) {
@@ -2185,7 +2039,7 @@ ntb_link_enabled(struct ntb_softc *ntb)
if (ntb->conn_type == NTB_CONN_TRANSPARENT)
return (true);
- cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
return ((cntl & NTB_CNTL_LINK_DISABLE) == 0);
}
@@ -2210,11 +2064,11 @@ recover_atom_link(void *arg)
if (atom_link_is_err(ntb))
goto retry;
- status32 = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
goto out;
- status32 = ntb_reg_read(4, ntb->reg->lnk_sta);
+ status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
width = NTB_LNK_STA_WIDTH(status32);
speed = status32 & NTB_LINK_SPEED_MASK;
@@ -2237,18 +2091,18 @@ retry:
* Polls the HW link status register(s); returns true if something has changed.
*/
static bool
-ntb_poll_link(struct ntb_softc *ntb)
+intel_ntb_poll_link(struct ntb_softc *ntb)
{
uint32_t ntb_cntl;
uint16_t reg_val;
if (ntb->type == NTB_ATOM) {
- ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
if (ntb_cntl == ntb->ntb_ctl)
return (false);
ntb->ntb_ctl = ntb_cntl;
- ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta);
+ ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
} else {
db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask);
@@ -2258,11 +2112,11 @@ ntb_poll_link(struct ntb_softc *ntb)
ntb->lnk_sta = reg_val;
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
if (_xeon_link_is_up(ntb)) {
if (!ntb->peer_msix_good) {
callout_reset(&ntb->peer_msix_work, 0,
- ntb_exchange_msix, ntb);
+ intel_ntb_exchange_msix, ntb);
return (false);
}
} else {
@@ -2275,7 +2129,7 @@ ntb_poll_link(struct ntb_softc *ntb)
}
static inline enum ntb_speed
-ntb_link_sta_speed(struct ntb_softc *ntb)
+intel_ntb_link_sta_speed(struct ntb_softc *ntb)
{
if (!link_is_up(ntb))
@@ -2284,7 +2138,7 @@ ntb_link_sta_speed(struct ntb_softc *ntb)
}
static inline enum ntb_width
-ntb_link_sta_width(struct ntb_softc *ntb)
+intel_ntb_link_sta_width(struct ntb_softc *ntb)
{
if (!link_is_up(ntb))
@@ -2306,7 +2160,7 @@ SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0,
#define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG)
static void
-ntb_sysctl_init(struct ntb_softc *ntb)
+intel_ntb_sysctl_init(struct ntb_softc *ntb)
{
struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar;
struct sysctl_ctx_list *ctx;
@@ -2405,7 +2259,7 @@ ntb_sysctl_init(struct ntb_softc *ntb)
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_64 | ntb->xlat_reg->bar2_xlat,
sysctl_handle_register, "QU", "Incoming XLAT23 register");
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4",
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_32 | ntb->xlat_reg->bar4_xlat,
@@ -2425,7 +2279,7 @@ ntb_sysctl_init(struct ntb_softc *ntb)
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_64 | ntb->xlat_reg->bar2_limit,
sysctl_handle_register, "QU", "Incoming LMT23 register");
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4",
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_32 | ntb->xlat_reg->bar4_limit,
@@ -2516,7 +2370,7 @@ ntb_sysctl_init(struct ntb_softc *ntb)
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off,
sysctl_handle_register, "QU", "Outgoing XLAT23 register");
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4",
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
@@ -2536,7 +2390,7 @@ ntb_sysctl_init(struct ntb_softc *ntb)
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_64 | XEON_PBAR2LMT_OFFSET,
sysctl_handle_register, "QU", "Outgoing LMT23 register");
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4",
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_32 | XEON_PBAR4LMT_OFFSET,
@@ -2560,7 +2414,7 @@ ntb_sysctl_init(struct ntb_softc *ntb)
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_64 | ntb->xlat_reg->bar2_base,
sysctl_handle_register, "QU", "Secondary BAR23 base register");
- if (HAS_FEATURE(NTB_SPLIT_BAR)) {
+ if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base",
CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
NTB_REG_32 | ntb->xlat_reg->bar4_base,
@@ -2583,13 +2437,10 @@ ntb_sysctl_init(struct ntb_softc *ntb)
static int
sysctl_handle_features(SYSCTL_HANDLER_ARGS)
{
- struct ntb_softc *ntb;
+ struct ntb_softc *ntb = arg1;
struct sbuf sb;
int error;
- error = 0;
- ntb = arg1;
-
sbuf_new_for_sysctl(&sb, NULL, 256, req);
sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR);
@@ -2604,14 +2455,11 @@ sysctl_handle_features(SYSCTL_HANDLER_ARGS)
static int
sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
{
- struct ntb_softc *ntb;
+ struct ntb_softc *ntb = arg1;
unsigned old, new;
int error;
- error = 0;
- ntb = arg1;
-
- old = ntb_link_enabled(ntb);
+ old = intel_ntb_link_enabled(ntb->device);
error = SYSCTL_OUT(req, &old, sizeof(old));
if (error != 0 || req->newptr == NULL)
@@ -2621,31 +2469,28 @@ sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
- ntb_printf(0, "Admin set interface state to '%sabled'\n",
+ intel_ntb_printf(0, "Admin set interface state to '%sabled'\n",
(new != 0)? "en" : "dis");
if (new != 0)
- error = ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
else
- error = ntb_link_disable(ntb);
+ error = intel_ntb_link_disable(ntb->device);
return (error);
}
static int
sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
{
- struct ntb_softc *ntb;
+ struct ntb_softc *ntb = arg1;
struct sbuf sb;
enum ntb_speed speed;
enum ntb_width width;
int error;
- error = 0;
- ntb = arg1;
-
sbuf_new_for_sysctl(&sb, NULL, 32, req);
- if (ntb_link_is_up(ntb, &speed, &width))
+ if (intel_ntb_link_is_up(ntb->device, &speed, &width))
sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u",
(unsigned)speed, (unsigned)width);
else
@@ -2662,14 +2507,11 @@ sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
static int
sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)
{
- struct ntb_softc *ntb;
+ struct ntb_softc *ntb = arg1;
unsigned res;
int error;
- error = 0;
- ntb = arg1;
-
- res = ntb_link_is_up(ntb, NULL, NULL);
+ res = intel_ntb_link_is_up(ntb->device, NULL, NULL);
error = SYSCTL_OUT(req, &res, sizeof(res));
if (error || !req->newptr)
@@ -2708,28 +2550,28 @@ sysctl_handle_register(SYSCTL_HANDLER_ARGS)
if (pci)
umv = pci_read_config(ntb->device, reg, 8);
else
- umv = ntb_reg_read(8, reg);
+ umv = intel_ntb_reg_read(8, reg);
outsz = sizeof(uint64_t);
break;
case NTB_REG_32:
if (pci)
umv = pci_read_config(ntb->device, reg, 4);
else
- umv = ntb_reg_read(4, reg);
+ umv = intel_ntb_reg_read(4, reg);
outsz = sizeof(uint32_t);
break;
case NTB_REG_16:
if (pci)
umv = pci_read_config(ntb->device, reg, 2);
else
- umv = ntb_reg_read(2, reg);
+ umv = intel_ntb_reg_read(2, reg);
outsz = sizeof(uint16_t);
break;
case NTB_REG_8:
if (pci)
umv = pci_read_config(ntb->device, reg, 1);
else
- umv = ntb_reg_read(1, reg);
+ umv = intel_ntb_reg_read(1, reg);
outsz = sizeof(uint8_t);
break;
default:
@@ -2749,7 +2591,7 @@ sysctl_handle_register(SYSCTL_HANDLER_ARGS)
}
static unsigned
-ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
+intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
{
if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
@@ -2763,8 +2605,21 @@ ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
return (uidx);
}
+#ifndef EARLY_AP_STARTUP
+static int msix_ready;
+
+static void
+intel_ntb_msix_ready(void *arg __unused)
+{
+
+ msix_ready = 1;
+}
+SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY,
+ intel_ntb_msix_ready, NULL);
+#endif
+
static void
-ntb_exchange_msix(void *ctx)
+intel_ntb_exchange_msix(void *ctx)
{
struct ntb_softc *ntb;
uint32_t val;
@@ -2777,42 +2632,50 @@ ntb_exchange_msix(void *ctx)
if (ntb->peer_msix_done)
goto msix_done;
+#ifndef EARLY_AP_STARTUP
+ /* Block MSIX negotiation until SMP started and IRQ reshuffled. */
+ if (!msix_ready)
+ goto reschedule;
+#endif
+
+ intel_ntb_get_msix_info(ntb);
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- ntb_peer_spad_write(ntb, NTB_MSIX_DATA0 + i,
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
ntb->msix_data[i].nmd_data);
- ntb_peer_spad_write(ntb, NTB_MSIX_OFS0 + i,
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
ntb->msix_data[i].nmd_ofs - ntb->msix_xlat);
}
- ntb_peer_spad_write(ntb, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
- ntb_spad_read(ntb, NTB_MSIX_GUARD, &val);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
if (val != NTB_MSIX_VER_GUARD)
goto reschedule;
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- ntb_spad_read(ntb, NTB_MSIX_DATA0 + i, &val);
- ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
+ intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
ntb->peer_msix_data[i].nmd_data = val;
- ntb_spad_read(ntb, NTB_MSIX_OFS0 + i, &val);
- ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
+ intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
ntb->peer_msix_data[i].nmd_ofs = val;
}
ntb->peer_msix_done = true;
msix_done:
- ntb_peer_spad_write(ntb, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
- ntb_spad_read(ntb, NTB_MSIX_DONE, &val);
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
if (val != NTB_MSIX_RECEIVED)
goto reschedule;
+ intel_ntb_spad_clear(ntb->device);
ntb->peer_msix_good = true;
/* Give peer time to see our NTB_MSIX_RECEIVED. */
goto reschedule;
msix_good:
- ntb_poll_link(ntb);
- ntb_link_event(ntb);
+ intel_ntb_poll_link(ntb);
+ ntb_link_event(ntb->device);
return;
reschedule:
@@ -2820,40 +2683,27 @@ reschedule:
if (_xeon_link_is_up(ntb)) {
callout_reset(&ntb->peer_msix_work,
hz * (ntb->peer_msix_good ? 2 : 1) / 100,
- ntb_exchange_msix, ntb);
+ intel_ntb_exchange_msix, ntb);
} else
- ntb_spad_clear(ntb);
+ intel_ntb_spad_clear(ntb->device);
}
/*
* Public API to the rest of the OS
*/
-/**
- * ntb_get_max_spads() - get the total scratch regs usable
- * @ntb: pointer to ntb_softc instance
- *
- * This function returns the max 32bit scratchpad registers usable by the
- * upper layer.
- *
- * RETURNS: total number of scratch pad registers available
- */
-uint8_t
-ntb_get_max_spads(struct ntb_softc *ntb)
+static uint8_t
+intel_ntb_spad_count(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
return (ntb->spad_count);
}
-/*
- * ntb_mw_count() - Get the number of memory windows available for KPI
- * consumers.
- *
- * (Excludes any MW wholly reserved for register access.)
- */
-uint8_t
-ntb_mw_count(struct ntb_softc *ntb)
+static uint8_t
+intel_ntb_mw_count(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
uint8_t res;
res = ntb->mw_count;
@@ -2864,25 +2714,15 @@ ntb_mw_count(struct ntb_softc *ntb)
return (res);
}
-/**
- * ntb_spad_write() - write to the secondary scratchpad register
- * @ntb: pointer to ntb_softc instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register. The register resides on the secondary (external) side.
- *
- * RETURNS: An appropriate ERRNO error value on error, or zero for success.
- */
-int
-ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
+static int
+intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
if (idx >= ntb->spad_count)
return (EINVAL);
- ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
+ intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
return (0);
}
@@ -2890,122 +2730,77 @@ ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
/*
* Zeros the local scratchpad.
*/
-void
-ntb_spad_clear(struct ntb_softc *ntb)
+static void
+intel_ntb_spad_clear(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
unsigned i;
for (i = 0; i < ntb->spad_count; i++)
- ntb_spad_write(ntb, i, 0);
+ intel_ntb_spad_write(dev, i, 0);
}
-/**
- * ntb_spad_read() - read from the primary scratchpad register
- * @ntb: pointer to ntb_softc instance
- * @idx: index to scratchpad register, 0 based
- * @val: pointer to 32bit integer for storing the register value
- *
- * This function allows reading of the 32bit scratchpad register on
- * the primary (internal) side.
- *
- * RETURNS: An appropriate ERRNO error value on error, or zero for success.
- */
-int
-ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val)
+static int
+intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
if (idx >= ntb->spad_count)
return (EINVAL);
- *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
+ *val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
return (0);
}
-/**
- * ntb_peer_spad_write() - write to the secondary scratchpad register
- * @ntb: pointer to ntb_softc instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register. The register resides on the secondary (external) side.
- *
- * RETURNS: An appropriate ERRNO error value on error, or zero for success.
- */
-int
-ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val)
+static int
+intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
if (idx >= ntb->spad_count)
return (EINVAL);
- if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP))
- ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
+ if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
+ intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
else
- ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
+ intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
return (0);
}
-/**
- * ntb_peer_spad_read() - read from the primary scratchpad register
- * @ntb: pointer to ntb_softc instance
- * @idx: index to scratchpad register, 0 based
- * @val: pointer to 32bit integer for storing the register value
- *
- * This function allows reading of the 32bit scratchpad register on
- * the primary (internal) side.
- *
- * RETURNS: An appropriate ERRNO error value on error, or zero for success.
- */
-int
-ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val)
+static int
+intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
if (idx >= ntb->spad_count)
return (EINVAL);
- if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP))
- *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
+ if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
+ *val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
else
- *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
+ *val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
return (0);
}
-/*
- * ntb_mw_get_range() - get the range of a memory window
- * @ntb: NTB device context
- * @idx: Memory window number
- * @base: OUT - the base address for mapping the memory window
- * @size: OUT - the size for mapping the memory window
- * @align: OUT - the base alignment for translating the memory window
- * @align_size: OUT - the size alignment for translating the memory window
- *
- * Get the range of a memory window. NULL may be given for any output
- * parameter if the value is not needed. The base and size may be used for
- * mapping the memory window, to access the peer memory. The alignment and
- * size may be used for translating the memory window, for the peer to access
- * memory on the local system.
- *
- * Return: Zero on success, otherwise an error number.
- */
-int
-ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base,
+static int
+intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
bus_addr_t *plimit)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
bus_addr_t limit;
size_t bar_b2b_off;
enum ntb_bar bar_num;
- if (mw_idx >= ntb_mw_count(ntb))
+ if (mw_idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- mw_idx = ntb_user_mw_to_idx(ntb, mw_idx);
+ mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx);
- bar_num = ntb_mw_to_bar(ntb, mw_idx);
+ bar_num = intel_ntb_mw_to_bar(ntb, mw_idx);
bar = &ntb->bar_info[bar_num];
bar_b2b_off = 0;
if (mw_idx == ntb->b2b_mw_idx) {
@@ -3034,37 +2829,21 @@ ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base,
return (0);
}
-/*
- * ntb_mw_set_trans() - set the translation of a memory window
- * @ntb: NTB device context
- * @idx: Memory window number
- * @addr: The dma address local memory to expose to the peer
- * @size: The size of the local memory to expose to the peer
- *
- * Set the translation of a memory window. The peer may access local memory
- * through the window starting at the address, up to the size. The address
- * must be aligned to the alignment specified by ntb_mw_get_range(). The size
- * must be aligned to the size alignment specified by ntb_mw_get_range(). The
- * address must be below the plimit specified by ntb_mw_get_range() (i.e. for
- * 32-bit BARs).
- *
- * Return: Zero on success, otherwise an error number.
- */
-int
-ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr,
- size_t size)
+static int
+intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
uint64_t base, limit, reg_val;
size_t bar_size, mw_size;
uint32_t base_reg, xlat_reg, limit_reg;
enum ntb_bar bar_num;
- if (idx >= ntb_mw_count(ntb))
+ if (idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- idx = ntb_user_mw_to_idx(ntb, idx);
+ idx = intel_ntb_user_mw_to_idx(ntb, idx);
- bar_num = ntb_mw_to_bar(ntb, idx);
+ bar_num = intel_ntb_mw_to_bar(ntb, idx);
bar = &ntb->bar_info[bar_num];
bar_size = bar->size;
@@ -3084,25 +2863,25 @@ ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr,
limit = 0;
if (bar_is_64bit(ntb, bar_num)) {
- base = ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
+ base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
if (limit_reg != 0 && size != mw_size)
limit = base + size;
/* Set and verify translation address */
- ntb_reg_write(8, xlat_reg, addr);
- reg_val = ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(8, xlat_reg, addr);
+ reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
if (reg_val != addr) {
- ntb_reg_write(8, xlat_reg, 0);
+ intel_ntb_reg_write(8, xlat_reg, 0);
return (EIO);
}
/* Set and verify the limit */
- ntb_reg_write(8, limit_reg, limit);
- reg_val = ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(8, limit_reg, limit);
+ reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
if (reg_val != limit) {
- ntb_reg_write(8, limit_reg, base);
- ntb_reg_write(8, xlat_reg, 0);
+ intel_ntb_reg_write(8, limit_reg, base);
+ intel_ntb_reg_write(8, xlat_reg, 0);
return (EIO);
}
} else {
@@ -3113,98 +2892,72 @@ ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr,
if (((addr + size) & UINT32_MAX) != (addr + size))
return (ERANGE);
- base = ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
+ base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
if (limit_reg != 0 && size != mw_size)
limit = base + size;
/* Set and verify translation address */
- ntb_reg_write(4, xlat_reg, addr);
- reg_val = ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(4, xlat_reg, addr);
+ reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
if (reg_val != addr) {
- ntb_reg_write(4, xlat_reg, 0);
+ intel_ntb_reg_write(4, xlat_reg, 0);
return (EIO);
}
/* Set and verify the limit */
- ntb_reg_write(4, limit_reg, limit);
- reg_val = ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(4, limit_reg, limit);
+ reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
if (reg_val != limit) {
- ntb_reg_write(4, limit_reg, base);
- ntb_reg_write(4, xlat_reg, 0);
+ intel_ntb_reg_write(4, limit_reg, base);
+ intel_ntb_reg_write(4, xlat_reg, 0);
return (EIO);
}
}
return (0);
}
-/*
- * ntb_mw_clear_trans() - clear the translation of a memory window
- * @ntb: NTB device context
- * @idx: Memory window number
- *
- * Clear the translation of a memory window. The peer may no longer access
- * local memory through the window.
- *
- * Return: Zero on success, otherwise an error number.
- */
-int
-ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx)
+static int
+intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
{
- return (ntb_mw_set_trans(ntb, mw_idx, 0, 0));
+ return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0));
}
-/*
- * ntb_mw_get_wc - Get the write-combine status of a memory window
- *
- * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if
- * idx is an invalid memory window).
- *
- * Mode is a VM_MEMATTR_* type.
- */
-int
-ntb_mw_get_wc(struct ntb_softc *ntb, unsigned idx, vm_memattr_t *mode)
+static int
+intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
- if (idx >= ntb_mw_count(ntb))
+ if (idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- idx = ntb_user_mw_to_idx(ntb, idx);
+ idx = intel_ntb_user_mw_to_idx(ntb, idx);
- bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)];
+ bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
*mode = bar->map_mode;
return (0);
}
-/*
- * ntb_mw_set_wc - Set the write-combine status of a memory window
- *
- * If 'mode' matches the current status, this does nothing and succeeds. Mode
- * is a VM_MEMATTR_* type.
- *
- * Returns: Zero on success, setting the caching attribute on the virtual
- * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid
- * memory window, or if changing the caching attribute fails).
- */
-int
-ntb_mw_set_wc(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
+static int
+intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
- if (idx >= ntb_mw_count(ntb))
+ if (idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- idx = ntb_user_mw_to_idx(ntb, idx);
- return (ntb_mw_set_wc_internal(ntb, idx, mode));
+ idx = intel_ntb_user_mw_to_idx(ntb, idx);
+ return (intel_ntb_mw_set_wc_internal(ntb, idx, mode));
}
static int
-ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
+intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
{
struct ntb_pci_bar_info *bar;
int rc;
- bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)];
+ bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
if (bar->map_mode == mode)
return (0);
@@ -3215,26 +2968,19 @@ ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
return (rc);
}
-/**
- * ntb_peer_db_set() - Set the doorbell on the secondary/external side
- * @ntb: pointer to ntb_softc instance
- * @bit: doorbell bits to ring
- *
- * This function allows triggering of a doorbell on the secondary/external
- * side that will initiate an interrupt on the remote host
- */
-void
-ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit)
+static void
+intel_ntb_peer_db_set(device_t dev, uint64_t bit)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
- if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) {
+ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
struct ntb_pci_bar_info *lapic;
unsigned i;
lapic = ntb->peer_lapic_bar;
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- if ((bit & ntb_db_vector_mask(ntb, i)) != 0)
+ if ((bit & intel_ntb_db_vector_mask(dev, i)) != 0)
bus_space_write_4(lapic->pci_bus_tag,
lapic->pci_bus_handle,
ntb->peer_msix_data[i].nmd_ofs,
@@ -3243,99 +2989,76 @@ ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit)
return;
}
- if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) {
- ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit);
+ if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
+ intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit);
return;
}
db_iowrite(ntb, ntb->peer_reg->db_bell, bit);
}
-/*
- * ntb_get_peer_db_addr() - Return the address of the remote doorbell register,
- * as well as the size of the register (via *sz_out).
- *
- * This function allows a caller using I/OAT DMA to chain the remote doorbell
- * ring to its memory window write.
- *
- * Note that writing the peer doorbell via a memory window will *not* generate
- * an interrupt on the remote host; that must be done separately.
- */
-bus_addr_t
-ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out)
+static int
+intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
uint64_t regoff;
- KASSERT(sz_out != NULL, ("must be non-NULL"));
+ KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL"));
- if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) {
+ if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
bar = &ntb->bar_info[NTB_CONFIG_BAR];
regoff = ntb->peer_reg->db_bell;
} else {
KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
("invalid b2b idx"));
- bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
+ bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
regoff = XEON_PDOORBELL_OFFSET;
}
KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
- *sz_out = ntb->reg->db_size;
/* HACK: Specific to current x86 bus implementation. */
- return ((uint64_t)bar->pci_bus_handle + regoff);
+ *db_addr = ((uint64_t)bar->pci_bus_handle + regoff);
+ *db_size = ntb->reg->db_size;
+ return (0);
}
-/*
- * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
- * @ntb: NTB device context
- *
- * Hardware may support different number or arrangement of doorbell bits.
- *
- * Return: A mask of doorbell bits supported by the ntb.
- */
-uint64_t
-ntb_db_valid_mask(struct ntb_softc *ntb)
+static uint64_t
+intel_ntb_db_valid_mask(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
return (ntb->db_valid_mask);
}
-/*
- * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
- * @ntb: NTB device context
- * @vector: Doorbell vector number
- *
- * Each interrupt vector may have a different number or arrangement of bits.
- *
- * Return: A mask of doorbell bits serviced by a vector.
- */
-uint64_t
-ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector)
+static int
+intel_ntb_db_vector_count(device_t dev)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
+
+ return (ntb->db_vec_count);
+}
+
+static uint64_t
+intel_ntb_db_vector_mask(device_t dev, uint32_t vector)
+{
+ struct ntb_softc *ntb = device_get_softc(dev);
if (vector > ntb->db_vec_count)
return (0);
- return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector));
+ return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector));
}
-/**
- * ntb_link_is_up() - get the current ntb link state
- * @ntb: NTB device context
- * @speed: OUT - The link speed expressed as PCIe generation number
- * @width: OUT - The link width expressed as the number of PCIe lanes
- *
- * RETURNS: true or false based on the hardware link state
- */
-bool
-ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed,
- enum ntb_width *width)
+static bool
+intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
{
+ struct ntb_softc *ntb = device_get_softc(dev);
if (speed != NULL)
- *speed = ntb_link_sta_speed(ntb);
+ *speed = intel_ntb_link_sta_speed(ntb);
if (width != NULL)
- *width = ntb_link_sta_width(ntb);
+ *width = intel_ntb_link_sta_width(ntb);
return (link_is_up(ntb));
}
@@ -3350,17 +3073,42 @@ save_bar_parameters(struct ntb_pci_bar_info *bar)
bar->vbase = rman_get_virtual(bar->pci_resource);
}
-device_t
-ntb_get_device(struct ntb_softc *ntb)
-{
-
- return (ntb->device);
-}
-
-/* Export HW-specific errata information. */
-bool
-ntb_has_feature(struct ntb_softc *ntb, uint32_t feature)
-{
+static device_method_t ntb_intel_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, intel_ntb_probe),
+ DEVMETHOD(device_attach, intel_ntb_attach),
+ DEVMETHOD(device_detach, intel_ntb_detach),
+ /* NTB interface */
+ DEVMETHOD(ntb_link_is_up, intel_ntb_link_is_up),
+ DEVMETHOD(ntb_link_enable, intel_ntb_link_enable),
+ DEVMETHOD(ntb_link_disable, intel_ntb_link_disable),
+ DEVMETHOD(ntb_link_enabled, intel_ntb_link_enabled),
+ DEVMETHOD(ntb_mw_count, intel_ntb_mw_count),
+ DEVMETHOD(ntb_mw_get_range, intel_ntb_mw_get_range),
+ DEVMETHOD(ntb_mw_set_trans, intel_ntb_mw_set_trans),
+ DEVMETHOD(ntb_mw_clear_trans, intel_ntb_mw_clear_trans),
+ DEVMETHOD(ntb_mw_get_wc, intel_ntb_mw_get_wc),
+ DEVMETHOD(ntb_mw_set_wc, intel_ntb_mw_set_wc),
+ DEVMETHOD(ntb_spad_count, intel_ntb_spad_count),
+ DEVMETHOD(ntb_spad_clear, intel_ntb_spad_clear),
+ DEVMETHOD(ntb_spad_write, intel_ntb_spad_write),
+ DEVMETHOD(ntb_spad_read, intel_ntb_spad_read),
+ DEVMETHOD(ntb_peer_spad_write, intel_ntb_peer_spad_write),
+ DEVMETHOD(ntb_peer_spad_read, intel_ntb_peer_spad_read),
+ DEVMETHOD(ntb_db_valid_mask, intel_ntb_db_valid_mask),
+ DEVMETHOD(ntb_db_vector_count, intel_ntb_db_vector_count),
+ DEVMETHOD(ntb_db_vector_mask, intel_ntb_db_vector_mask),
+ DEVMETHOD(ntb_db_clear, intel_ntb_db_clear),
+ DEVMETHOD(ntb_db_clear_mask, intel_ntb_db_clear_mask),
+ DEVMETHOD(ntb_db_read, intel_ntb_db_read),
+ DEVMETHOD(ntb_db_set_mask, intel_ntb_db_set_mask),
+ DEVMETHOD(ntb_peer_db_addr, intel_ntb_peer_db_addr),
+ DEVMETHOD(ntb_peer_db_set, intel_ntb_peer_db_set),
+ DEVMETHOD_END
+};
- return (HAS_FEATURE(feature));
-}
+static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods,
+ sizeof(struct ntb_softc));
+DRIVER_MODULE(ntb_intel, pci, ntb_intel_driver, ntb_hw_devclass, NULL, NULL);
+MODULE_DEPEND(ntb_intel, ntb, 1, 1, 1);
+MODULE_VERSION(ntb_intel, 1);
diff --git a/sys/dev/ntb/ntb_hw/ntb_hw.h b/sys/dev/ntb/ntb_hw/ntb_hw.h
deleted file mode 100644
index f05acda..0000000
--- a/sys/dev/ntb/ntb_hw/ntb_hw.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*-
- * Copyright (C) 2013 Intel Corporation
- * Copyright (C) 2015 EMC Corporation
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _NTB_HW_H_
-#define _NTB_HW_H_
-
-struct ntb_softc;
-
-#define NTB_MAX_NUM_MW 3
-
-enum ntb_speed {
- NTB_SPEED_AUTO = -1,
- NTB_SPEED_NONE = 0,
- NTB_SPEED_GEN1 = 1,
- NTB_SPEED_GEN2 = 2,
- NTB_SPEED_GEN3 = 3,
-};
-
-enum ntb_width {
- NTB_WIDTH_AUTO = -1,
- NTB_WIDTH_NONE = 0,
- NTB_WIDTH_1 = 1,
- NTB_WIDTH_2 = 2,
- NTB_WIDTH_4 = 4,
- NTB_WIDTH_8 = 8,
- NTB_WIDTH_12 = 12,
- NTB_WIDTH_16 = 16,
- NTB_WIDTH_32 = 32,
-};
-
-SYSCTL_DECL(_hw_ntb);
-
-typedef void (*ntb_db_callback)(void *data, uint32_t vector);
-typedef void (*ntb_event_callback)(void *data);
-
-struct ntb_ctx_ops {
- ntb_event_callback link_event;
- ntb_db_callback db_event;
-};
-
-device_t ntb_get_device(struct ntb_softc *);
-
-bool ntb_link_is_up(struct ntb_softc *, enum ntb_speed *, enum ntb_width *);
-void ntb_link_event(struct ntb_softc *);
-int ntb_link_enable(struct ntb_softc *, enum ntb_speed, enum ntb_width);
-int ntb_link_disable(struct ntb_softc *);
-bool ntb_link_enabled(struct ntb_softc *);
-
-int ntb_set_ctx(struct ntb_softc *, void *, const struct ntb_ctx_ops *);
-void *ntb_get_ctx(struct ntb_softc *, const struct ntb_ctx_ops **);
-void ntb_clear_ctx(struct ntb_softc *);
-
-uint8_t ntb_mw_count(struct ntb_softc *);
-int ntb_mw_get_range(struct ntb_softc *, unsigned mw_idx, vm_paddr_t *base,
- caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
- bus_addr_t *plimit);
-int ntb_mw_set_trans(struct ntb_softc *, unsigned mw_idx, bus_addr_t, size_t);
-int ntb_mw_clear_trans(struct ntb_softc *, unsigned mw_idx);
-
-int ntb_mw_get_wc(struct ntb_softc *, unsigned mw_idx, vm_memattr_t *mode);
-int ntb_mw_set_wc(struct ntb_softc *, unsigned mw_idx, vm_memattr_t mode);
-
-uint8_t ntb_get_max_spads(struct ntb_softc *ntb);
-void ntb_spad_clear(struct ntb_softc *ntb);
-int ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val);
-int ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val);
-int ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx,
- uint32_t val);
-int ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx,
- uint32_t *val);
-
-uint64_t ntb_db_valid_mask(struct ntb_softc *);
-uint64_t ntb_db_vector_mask(struct ntb_softc *, uint32_t vector);
-bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *, vm_size_t *sz_out);
-
-void ntb_db_clear(struct ntb_softc *, uint64_t bits);
-void ntb_db_clear_mask(struct ntb_softc *, uint64_t bits);
-uint64_t ntb_db_read(struct ntb_softc *);
-void ntb_db_set_mask(struct ntb_softc *, uint64_t bits);
-void ntb_peer_db_set(struct ntb_softc *, uint64_t bits);
-
-#define XEON_SPAD_COUNT 16
-#define ATOM_SPAD_COUNT 16
-
-/* Hardware owns the low 16 bits of features. */
-#define NTB_BAR_SIZE_4K (1 << 0)
-#define NTB_SDOORBELL_LOCKUP (1 << 1)
-#define NTB_SB01BASE_LOCKUP (1 << 2)
-#define NTB_B2BDOORBELL_BIT14 (1 << 3)
-/* Software/configuration owns the top 16 bits. */
-#define NTB_SPLIT_BAR (1ull << 16)
-
-#define NTB_FEATURES_STR \
- "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \
- "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K"
-
-bool ntb_has_feature(struct ntb_softc *, uint32_t);
-
-#endif /* _NTB_HW_H_ */
diff --git a/sys/dev/ntb/ntb_hw/ntb_regs.h b/sys/dev/ntb/ntb_hw/ntb_regs.h
index fb445d7..a037736 100644
--- a/sys/dev/ntb/ntb_hw/ntb_regs.h
+++ b/sys/dev/ntb/ntb_hw/ntb_regs.h
@@ -1,4 +1,5 @@
/*-
+ * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
* Copyright (C) 2013 Intel Corporation
* Copyright (C) 2015 EMC Corporation
* All rights reserved.
@@ -76,6 +77,7 @@
#define XEON_SDBMSK_OFFSET 0x0066
#define XEON_USMEMMISS_OFFSET 0x0070
#define XEON_SPAD_OFFSET 0x0080
+#define XEON_SPAD_COUNT 16
#define XEON_SPADSEMA4_OFFSET 0x00c0
#define XEON_WCCNTRL_OFFSET 0x00e0
#define XEON_UNCERRSTS_OFFSET 0x014c
@@ -104,6 +106,7 @@
#define ATOM_NTBCNTL_OFFSET 0x0060
#define ATOM_EBDF_OFFSET 0x0064
#define ATOM_SPAD_OFFSET 0x0080
+#define ATOM_SPAD_COUNT 16
#define ATOM_SPADSEMA_OFFSET 0x00c0
#define ATOM_STKYSPAD_OFFSET 0x00c4
#define ATOM_PBAR2XLAT_OFFSET 0x8008
OpenPOWER on IntegriCloud