summaryrefslogtreecommitdiffstats
path: root/sys/dev/gem/if_gemvar.h
blob: 07d71e11f02e0f4f59c3b27be03e5074fd4a36ff (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
/*-
 * Copyright (C) 2001 Eduardo Horvath.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp
 *
 * $FreeBSD$
 */

#ifndef	_IF_GEMVAR_H
#define	_IF_GEMVAR_H


#include <sys/queue.h>
#include <sys/callout.h>

/*
 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver.
 */

/*
 * Transmit descriptor list size.  This is arbitrary, but allocate
 * enough descriptors for 64 pending transmissions and 16 segments
 * per packet. This limit is not actually enforced (packets with more segments
 * can be sent, depending on the busdma backend); it is however used as an
 * estimate for the tx window size.
 */
#define	GEM_NTXSEGS		16

#define	GEM_TXQUEUELEN		64
#define	GEM_NTXDESC		(GEM_TXQUEUELEN * GEM_NTXSEGS)
#define	GEM_MAXTXFREE		(GEM_NTXDESC - 1)
#define	GEM_NTXDESC_MASK	(GEM_NTXDESC - 1)
#define	GEM_NEXTTX(x)		((x + 1) & GEM_NTXDESC_MASK)

/*
 * Receive descriptor list size.  We have one Rx buffer per incoming
 * packet, so this logic is a little simpler.
 */
#define	GEM_NRXDESC		128
#define	GEM_NRXDESC_MASK	(GEM_NRXDESC - 1)
#define	GEM_PREVRX(x)		((x - 1) & GEM_NRXDESC_MASK)
#define	GEM_NEXTRX(x)		((x + 1) & GEM_NRXDESC_MASK)

/*
 * How many ticks to wait until to retry on a RX descriptor that is still owned
 * by the hardware.
 */
#define	GEM_RXOWN_TICKS		(hz / 50)

/*
 * Control structures are DMA'd to the GEM chip.  We allocate them in
 * a single clump that maps to a single DMA segment to make several things
 * easier.
 */
struct gem_control_data {
	/*
	 * The transmit descriptors.
	 */
	struct gem_desc gcd_txdescs[GEM_NTXDESC];

	/*
	 * The receive descriptors.
	 */
	struct gem_desc gcd_rxdescs[GEM_NRXDESC];
};

#define	GEM_CDOFF(x)		offsetof(struct gem_control_data, x)
#define	GEM_CDTXOFF(x)		GEM_CDOFF(gcd_txdescs[(x)])
#define	GEM_CDRXOFF(x)		GEM_CDOFF(gcd_rxdescs[(x)])

/*
 * Software state for transmit job mbufs (may be elements of mbuf chains).
 */
struct gem_txsoft {
	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
	bus_dmamap_t txs_dmamap;	/* our DMA map */
	int txs_firstdesc;		/* first descriptor in packet */
	int txs_lastdesc;		/* last descriptor in packet */
	int txs_ndescs;			/* number of descriptors */
	STAILQ_ENTRY(gem_txsoft) txs_q;
};

STAILQ_HEAD(gem_txsq, gem_txsoft);

/* Argument structure for busdma callback */
struct gem_txdma {
	struct gem_softc *txd_sc;
	struct gem_txsoft	*txd_txs;
};

/*
 * Software state for receive jobs.
 */
struct gem_rxsoft {
	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
	bus_dmamap_t rxs_dmamap;	/* our DMA map */
	bus_addr_t rxs_paddr;		/* physical address of the segment */
};

/*
 * Software state per device.
 */
struct gem_softc {
	struct arpcom	sc_arpcom;	/* arp common data */
	device_t	sc_miibus;
	struct mii_data	*sc_mii;	/* MII media control */
	device_t	sc_dev;		/* generic device information */
	struct callout	sc_tick_ch;	/* tick callout */
	struct callout	sc_rx_ch;	/* delayed rx callout */

	/* The following bus handles are to be provided by the bus front-end */
	bus_space_tag_t	sc_bustag;	/* bus tag */
	bus_dma_tag_t	sc_pdmatag;	/* parent bus dma tag */
	bus_dma_tag_t	sc_rdmatag;	/* RX bus dma tag */
	bus_dma_tag_t	sc_tdmatag;	/* TX bus dma tag */
	bus_dma_tag_t	sc_cdmatag;	/* control data bus dma tag */
	bus_dmamap_t	sc_dmamap;	/* bus dma handle */
	bus_space_handle_t sc_h;	/* bus space handle for all regs */

	int		sc_phys[2];	/* MII instance -> PHY map */

	int		sc_mif_config;	/* Selected MII reg setting */

	int		sc_pci;		/* XXXXX -- PCI buses are LE. */
	u_int		sc_variant;	/* which GEM are we dealing with? */
#define	GEM_UNKNOWN		0	/* don't know */
#define	GEM_SUN_GEM		1	/* Sun GEM variant */
#define	GEM_APPLE_GMAC		2	/* Apple GMAC variant */

	u_int		sc_flags;	/* */
#define	GEM_GIGABIT		0x0001	/* has a gigabit PHY */

	/*
	 * Ring buffer DMA stuff.
	 */
	bus_dma_segment_t sc_cdseg;	/* control data memory */
	int		sc_cdnseg;	/* number of segments */
	bus_dmamap_t	sc_cddmamap;	/* control data DMA map */
	bus_addr_t	sc_cddma;

	/*
	 * Software state for transmit and receive descriptors.
	 */
	struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN];
	struct gem_rxsoft sc_rxsoft[GEM_NRXDESC];

	/*
	 * Control data structures.
	 */
	struct gem_control_data *sc_control_data;
#define	sc_txdescs	sc_control_data->gcd_txdescs
#define	sc_rxdescs	sc_control_data->gcd_rxdescs

	int		sc_txfree;		/* number of free Tx descriptors */
	int		sc_txnext;		/* next ready Tx descriptor */
	int		sc_txwin;		/* Tx descriptors since last Tx int */

	struct gem_txsq	sc_txfreeq;	/* free Tx descsofts */
	struct gem_txsq	sc_txdirtyq;	/* dirty Tx descsofts */

	int		sc_rxptr;		/* next ready RX descriptor/descsoft */
	int		sc_rxfifosize;		/* Rx FIFO size (bytes) */

	/* ========== */
	int		sc_inited;
	int		sc_debug;
	int		sc_ifflags;
};

#define	GEM_DMA_READ(sc, v)	(((sc)->sc_pci) ? le64toh(v) : be64toh(v))
#define	GEM_DMA_WRITE(sc, v)	(((sc)->sc_pci) ? htole64(v) : htobe64(v))

#define	GEM_CDTXADDR(sc, x)	((sc)->sc_cddma + GEM_CDTXOFF((x)))
#define	GEM_CDRXADDR(sc, x)	((sc)->sc_cddma + GEM_CDRXOFF((x)))

#define	GEM_CDSYNC(sc, ops)						\
	bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops));	\

#define	GEM_INIT_RXDESC(sc, x)						\
do {									\
	struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)];			\
	struct gem_desc *__rxd = &sc->sc_rxdescs[(x)];			\
	struct mbuf *__m = __rxs->rxs_mbuf;				\
									\
	__m->m_data = __m->m_ext.ext_buf;				\
	__rxd->gd_addr =						\
	    GEM_DMA_WRITE((sc), __rxs->rxs_paddr);			\
	__rxd->gd_flags =						\
	    GEM_DMA_WRITE((sc),						\
			(((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT)	\
				& GEM_RD_BUFSIZE) | GEM_RD_OWN);	\
} while (0)

#ifdef _KERNEL
extern devclass_t gem_devclass;

int	gem_attach(struct gem_softc *);
void	gem_detach(struct gem_softc *);
void	gem_suspend(struct gem_softc *);
void	gem_resume(struct gem_softc *);
void	gem_intr(void *);

int	gem_mediachange(struct ifnet *);
void	gem_mediastatus(struct ifnet *, struct ifmediareq *);

void	gem_reset(struct gem_softc *);

/* MII methods & callbacks */
int	gem_mii_readreg(device_t, int, int);
int	gem_mii_writereg(device_t, int, int, int);
void	gem_mii_statchg(device_t);

#endif /* _KERNEL */


#endif
OpenPOWER on IntegriCloud