summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/fm10k/fm10k.h
blob: c8c8c5baefda0552c011d8149812298585ae5709 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
/* Intel Ethernet Switch Host Interface Driver
 * Copyright(c) 2013 - 2015 Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
 *
 * Contact Information:
 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 */

#ifndef _FM10K_H_
#define _FM10K_H_

#include <linux/types.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
#include <linux/clocksource.h>
#include <linux/ptp_clock_kernel.h>

#include "fm10k_pf.h"
#include "fm10k_vf.h"

#define FM10K_MAX_JUMBO_FRAME_SIZE	15358	/* Maximum supported size 15K */

#define MAX_QUEUES	FM10K_MAX_QUEUES_PF

#define FM10K_MIN_RXD		 128
#define FM10K_MAX_RXD		4096
#define FM10K_DEFAULT_RXD	 256

#define FM10K_MIN_TXD		 128
#define FM10K_MAX_TXD		4096
#define FM10K_DEFAULT_TXD	 256
#define FM10K_DEFAULT_TX_WORK	 256

#define FM10K_RXBUFFER_256	  256
#define FM10K_RX_HDR_LEN	FM10K_RXBUFFER_256
#define FM10K_RXBUFFER_2048	 2048
#define FM10K_RX_BUFSZ		FM10K_RXBUFFER_2048

/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define FM10K_RX_BUFFER_WRITE	16	/* Must be power of 2 */

#define FM10K_MAX_STATIONS	63
struct fm10k_l2_accel {
	int size;
	u16 count;
	u16 dglort;
	struct rcu_head rcu;
	struct net_device *macvlan[0];
};

enum fm10k_ring_state_t {
	__FM10K_TX_DETECT_HANG,
	__FM10K_HANG_CHECK_ARMED,
};

#define check_for_tx_hang(ring) \
	test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
	set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
	clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)

struct fm10k_tx_buffer {
	struct fm10k_tx_desc *next_to_watch;
	struct sk_buff *skb;
	unsigned int bytecount;
	u16 gso_segs;
	u16 tx_flags;
	DEFINE_DMA_UNMAP_ADDR(dma);
	DEFINE_DMA_UNMAP_LEN(len);
};

struct fm10k_rx_buffer {
	dma_addr_t dma;
	struct page *page;
	u32 page_offset;
};

struct fm10k_queue_stats {
	u64 packets;
	u64 bytes;
};

struct fm10k_tx_queue_stats {
	u64 restart_queue;
	u64 csum_err;
	u64 tx_busy;
	u64 tx_done_old;
};

struct fm10k_rx_queue_stats {
	u64 alloc_failed;
	u64 csum_err;
	u64 errors;
};

struct fm10k_ring {
	struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */
	struct net_device *netdev;	/* netdev ring belongs to */
	struct device *dev;		/* device for DMA mapping */
	struct fm10k_l2_accel __rcu *l2_accel;	/* L2 acceleration list */
	void *desc;			/* descriptor ring memory */
	union {
		struct fm10k_tx_buffer *tx_buffer;
		struct fm10k_rx_buffer *rx_buffer;
	};
	u32 __iomem *tail;
	unsigned long state;
	dma_addr_t dma;			/* phys. address of descriptor ring */
	unsigned int size;		/* length in bytes */

	u8 queue_index;			/* needed for queue management */
	u8 reg_idx;			/* holds the special value that gets
					 * the hardware register offset
					 * associated with this ring, which is
					 * different for DCB and RSS modes
					 */
	u8 qos_pc;			/* priority class of queue */
	u16 vid;			/* default vlan ID of queue */
	u16 count;			/* amount of descriptors */

	u16 next_to_alloc;
	u16 next_to_use;
	u16 next_to_clean;

	struct fm10k_queue_stats stats;
	struct u64_stats_sync syncp;
	union {
		/* Tx */
		struct fm10k_tx_queue_stats tx_stats;
		/* Rx */
		struct {
			struct fm10k_rx_queue_stats rx_stats;
			struct sk_buff *skb;
		};
	};
} ____cacheline_internodealigned_in_smp;

struct fm10k_ring_container {
	struct fm10k_ring *ring;	/* pointer to linked list of rings */
	unsigned int total_bytes;	/* total bytes processed this int */
	unsigned int total_packets;	/* total packets processed this int */
	u16 work_limit;			/* total work allowed per interrupt */
	u16 itr;			/* interrupt throttle rate value */
	u8 count;			/* total number of rings in vector */
};

#define FM10K_ITR_MAX		0x0FFF	/* maximum value for ITR */
#define FM10K_ITR_10K		100	/* 100us */
#define FM10K_ITR_20K		50	/* 50us */
#define FM10K_ITR_ADAPTIVE	0x8000	/* adaptive interrupt moderation flag */

#define FM10K_ITR_ENABLE	(FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR)

static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
{
	return &ring->netdev->_tx[ring->queue_index];
}

/* iterator for handling rings in ring container */
#define fm10k_for_each_ring(pos, head) \
	for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;)

#define MAX_Q_VECTORS 256
#define MIN_Q_VECTORS	1
enum fm10k_non_q_vectors {
	FM10K_MBX_VECTOR,
#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
	NON_Q_VECTORS_PF
};

#define NON_Q_VECTORS(hw)	(((hw)->mac.type == fm10k_mac_pf) ? \
						NON_Q_VECTORS_PF : \
						NON_Q_VECTORS_VF)
#define MIN_MSIX_COUNT(hw)	(MIN_Q_VECTORS + NON_Q_VECTORS(hw))

struct fm10k_q_vector {
	struct fm10k_intfc *interface;
	u32 __iomem *itr;	/* pointer to ITR register for this vector */
	u16 v_idx;		/* index of q_vector within interface array */
	struct fm10k_ring_container rx, tx;

	struct napi_struct napi;
	char name[IFNAMSIZ + 9];

#ifdef CONFIG_DEBUG_FS
	struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
	struct rcu_head rcu;	/* to avoid race with update stats on free */

	/* for dynamic allocation of rings associated with this q_vector */
	struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
};

enum fm10k_ring_f_enum {
	RING_F_RSS,
	RING_F_QOS,
	RING_F_ARRAY_SIZE  /* must be last in enum set */
};

struct fm10k_ring_feature {
	u16 limit;	/* upper limit on feature indices */
	u16 indices;	/* current value of indices */
	u16 mask;	/* Mask used for feature to ring mapping */
	u16 offset;	/* offset to start of feature */
};

struct fm10k_iov_data {
	unsigned int		num_vfs;
	unsigned int		next_vf_mbx;
	struct rcu_head		rcu;
	struct fm10k_vf_info	vf_info[0];
};

#define fm10k_vxlan_port_for_each(vp, intfc) \
	list_for_each_entry(vp, &(intfc)->vxlan_port, list)
struct fm10k_vxlan_port {
	struct list_head	list;
	sa_family_t		sa_family;
	__be16			port;
};

/* one work queue for entire driver */
extern struct workqueue_struct *fm10k_workqueue;

struct fm10k_intfc {
	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
	struct net_device *netdev;
	struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */
	struct pci_dev *pdev;
	unsigned long state;

	u32 flags;
#define FM10K_FLAG_RESET_REQUESTED		(u32)(1 << 0)
#define FM10K_FLAG_RSS_FIELD_IPV4_UDP		(u32)(1 << 1)
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP		(u32)(1 << 2)
#define FM10K_FLAG_RX_TS_ENABLED		(u32)(1 << 3)
#define FM10K_FLAG_SWPRI_CONFIG			(u32)(1 << 4)
	int xcast_mode;

	/* Tx fast path data */
	int num_tx_queues;
	u16 tx_itr;

	/* Rx fast path data */
	int num_rx_queues;
	u16 rx_itr;

	/* TX */
	struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp;

	u64 restart_queue;
	u64 tx_busy;
	u64 tx_csum_errors;
	u64 alloc_failed;
	u64 rx_csum_errors;

	u64 tx_bytes_nic;
	u64 tx_packets_nic;
	u64 rx_bytes_nic;
	u64 rx_packets_nic;
	u64 rx_drops_nic;
	u64 rx_overrun_pf;
	u64 rx_overrun_vf;
	u32 tx_timeout_count;

	/* RX */
	struct fm10k_ring *rx_ring[MAX_QUEUES];

	/* Queueing vectors */
	struct fm10k_q_vector *q_vector[MAX_Q_VECTORS];
	struct msix_entry *msix_entries;
	int num_q_vectors;	/* current number of q_vectors for device */
	struct fm10k_ring_feature ring_feature[RING_F_ARRAY_SIZE];

	/* SR-IOV information management structure */
	struct fm10k_iov_data *iov_data;

	struct fm10k_hw_stats stats;
	struct fm10k_hw hw;
	u32 __iomem *uc_addr;
	u32 __iomem *sw_addr;
	u16 msg_enable;
	u16 tx_ring_count;
	u16 rx_ring_count;
	struct timer_list service_timer;
	struct work_struct service_task;
	unsigned long next_stats_update;
	unsigned long next_tx_hang_check;
	unsigned long last_reset;
	unsigned long link_down_event;
	bool host_ready;

	u32 reta[FM10K_RETA_SIZE];
	u32 rssrk[FM10K_RSSRK_SIZE];

	/* VXLAN port tracking information */
	struct list_head vxlan_port;

#ifdef CONFIG_DEBUG_FS
	struct dentry *dbg_intfc;

#endif /* CONFIG_DEBUG_FS */
	struct ptp_clock_info ptp_caps;
	struct ptp_clock *ptp_clock;

	struct sk_buff_head ts_tx_skb_queue;
	u32 tx_hwtstamp_timeouts;

	struct hwtstamp_config ts_config;
	/* We are unable to actually adjust the clock beyond the frequency
	 * value.  Once the clock is started there is no resetting it.  As
	 * such we maintain a separate offset from the actual hardware clock
	 * to allow for offset adjustment.
	 */
	s64 ptp_adjust;
	rwlock_t systime_lock;
#ifdef CONFIG_DCB
	u8 pfc_en;
#endif
	u8 rx_pause;

	/* GLORT resources in use by PF */
	u16 glort;
	u16 glort_count;

	/* VLAN ID for updating multicast/unicast lists */
	u16 vid;
};

enum fm10k_state_t {
	__FM10K_RESETTING,
	__FM10K_DOWN,
	__FM10K_SERVICE_SCHED,
	__FM10K_SERVICE_DISABLE,
	__FM10K_MBX_LOCK,
	__FM10K_LINK_DOWN,
};

static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
{
	/* busy loop if we cannot obtain the lock as some calls
	 * such as ndo_set_rx_mode may be made in atomic context
	 */
	while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state))
		udelay(20);
}

static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
{
	/* flush memory to make sure state is correct */
	smp_mb__before_atomic();
	clear_bit(__FM10K_MBX_LOCK, &interface->state);
}

static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
{
	return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
}

/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc,
					const u32 stat_err_bits)
{
	return rx_desc->d.staterr & cpu_to_le32(stat_err_bits);
}

/* fm10k_desc_unused - calculate if we have unused descriptors */
static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
{
	s16 unused = ring->next_to_clean - ring->next_to_use - 1;

	return likely(unused < 0) ? unused + ring->count : unused;
}

#define FM10K_TX_DESC(R, i)	\
	(&(((struct fm10k_tx_desc *)((R)->desc))[i]))
#define FM10K_RX_DESC(R, i)	\
	 (&(((union fm10k_rx_desc *)((R)->desc))[i]))

#define FM10K_MAX_TXD_PWR	14
#define FM10K_MAX_DATA_PER_TXD	(1 << FM10K_MAX_TXD_PWR)

/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S)	DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
#define DESC_NEEDED	(MAX_SKB_FRAGS + 4)

enum fm10k_tx_flags {
	/* Tx offload flags */
	FM10K_TX_FLAGS_CSUM	= 0x01,
};

/* This structure is stored as little endian values as that is the native
 * format of the Rx descriptor.  The ordering of these fields is reversed
 * from the actual ftag header to allow for a single bswap to take care
 * of placing all of the values in network order
 */
union fm10k_ftag_info {
	__le64 ftag;
	struct {
		/* dglort and sglort combined into a single 32bit desc read */
		__le32 glort;
		/* upper 16 bits of vlan are reserved 0 for swpri_type_user */
		__le32 vlan;
	} d;
	struct {
		__le16 dglort;
		__le16 sglort;
		__le16 vlan;
		__le16 swpri_type_user;
	} w;
};

struct fm10k_cb {
	union {
		__le64 tstamp;
		unsigned long ts_tx_timeout;
	};
	union fm10k_ftag_info fi;
};

#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb)

/* main */
extern char fm10k_driver_name[];
extern const char fm10k_driver_version[];
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
				  struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);

/* PCI */
void fm10k_mbx_free_irq(struct fm10k_intfc *);
int fm10k_mbx_request_irq(struct fm10k_intfc *);
void fm10k_qv_free_irq(struct fm10k_intfc *interface);
int fm10k_qv_request_irq(struct fm10k_intfc *interface);
int fm10k_register_pci_driver(void);
void fm10k_unregister_pci_driver(void);
void fm10k_up(struct fm10k_intfc *interface);
void fm10k_down(struct fm10k_intfc *interface);
void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
#ifdef CONFIG_NET_POLL_CONTROLLER
void fm10k_netpoll(struct net_device *netdev);
#endif

/* Netdev */
struct net_device *fm10k_alloc_netdev(void);
int fm10k_setup_rx_resources(struct fm10k_ring *);
int fm10k_setup_tx_resources(struct fm10k_ring *);
void fm10k_free_rx_resources(struct fm10k_ring *);
void fm10k_free_tx_resources(struct fm10k_ring *);
void fm10k_clean_all_rx_rings(struct fm10k_intfc *);
void fm10k_clean_all_tx_rings(struct fm10k_intfc *);
void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *,
				      struct fm10k_tx_buffer *);
void fm10k_restore_rx_state(struct fm10k_intfc *);
void fm10k_reset_rx_state(struct fm10k_intfc *);
int fm10k_setup_tc(struct net_device *dev, u8 tc);
int fm10k_open(struct net_device *netdev);
int fm10k_close(struct net_device *netdev);

/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);

/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
s32 fm10k_iov_mbx(struct fm10k_intfc *interface);
void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
			  int vf_idx, u16 vid, u8 qos);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
			int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
			    int vf_idx, struct ifla_vf_info *ivi);

/* DebugFS */
#ifdef CONFIG_DEBUG_FS
void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector);
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector);
void fm10k_dbg_intfc_init(struct fm10k_intfc *interface);
void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface);
void fm10k_dbg_init(void);
void fm10k_dbg_exit(void);
#else
static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {}
static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {}
static inline void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) {}
static inline void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) {}
static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */

/* Time Stamping */
void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
			       struct skb_shared_hwtstamps *hwtstamp,
			       u64 systime);
void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
			  u64 systime);
void fm10k_ts_reset(struct fm10k_intfc *interface);
void fm10k_ts_init(struct fm10k_intfc *interface);
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
void fm10k_ptp_register(struct fm10k_intfc *interface);
void fm10k_ptp_unregister(struct fm10k_intfc *interface);
int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);

/* DCB */
void fm10k_dcbnl_set_ops(struct net_device *dev);
#endif /* _FM10K_H_ */
OpenPOWER on IntegriCloud