1/*
2 *  linux/drivers/net/ehea/ehea_qmr.h
3 *
4 *  eHEA ethernet device driver for IBM eServer System p
5 *
6 *  (C) Copyright IBM Corp. 2006
7 *
8 *  Authors:
9 *       Christoph Raisch <raisch@de.ibm.com>
10 *       Jan-Bernd Themann <themann@de.ibm.com>
11 *       Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_QMR_H__
30#define __EHEA_QMR_H__
31
32#include "ehea.h"
33#include "ehea_hw.h"
34
35/*
36 * page size of ehea hardware queues
37 */
38
39#define EHEA_PAGESHIFT  12
40#define EHEA_PAGESIZE   4096UL
41
42/* Some abbreviations used here:
43 *
44 * WQE  - Work Queue Entry
45 * SWQE - Send Work Queue Entry
46 * RWQE - Receive Work Queue Entry
47 * CQE  - Completion Queue Entry
48 * EQE  - Event Queue Entry
49 * MR   - Memory Region
50 */
51
52/* Use of WR_ID field for EHEA */
53#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
54#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
55#define EHEA_SWQE2_TYPE    0x1
56#define EHEA_SWQE3_TYPE    0x2
57#define EHEA_RWQE2_TYPE    0x3
58#define EHEA_RWQE3_TYPE    0x4
59#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
60#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
61
62struct ehea_vsgentry {
63	u64 vaddr;
64	u32 l_key;
65	u32 len;
66};
67
68/* maximum number of sg entries allowed in a WQE */
69#define EHEA_MAX_WQE_SG_ENTRIES  	252
70#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
71#define SWQE3_MAX_IMM            	224
72
73/* tx control flags for swqe */
74#define EHEA_SWQE_CRC                   0x8000
75#define EHEA_SWQE_IP_CHECKSUM           0x4000
76#define EHEA_SWQE_TCP_CHECKSUM          0x2000
77#define EHEA_SWQE_TSO                   0x1000
78#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
79#define EHEA_SWQE_VLAN_INSERT           0x0400
80#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
81#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
82#define EHEA_SWQE_WRAP_CTL_REC          0x0080
83#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
84#define EHEA_SWQE_BIND                  0x0020
85#define EHEA_SWQE_PURGE                 0x0010
86
87/* sizeof(struct ehea_swqe) less the union */
88#define SWQE_HEADER_SIZE		32
89
90struct ehea_swqe {
91	u64 wr_id;
92	u16 tx_control;
93	u16 vlan_tag;
94	u8 reserved1;
95	u8 ip_start;
96	u8 ip_end;
97	u8 immediate_data_length;
98	u8 tcp_offset;
99	u8 reserved2;
100	u16 tcp_end;
101	u8 wrap_tag;
102	u8 descriptors;		/* number of valid descriptors in WQE */
103	u16 reserved3;
104	u16 reserved4;
105	u16 mss;
106	u32 reserved5;
107	union {
108		/*  Send WQE Format 1 */
109		struct {
110			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
111		} no_immediate_data;
112
113		/*  Send WQE Format 2 */
114		struct {
115			struct ehea_vsgentry sg_entry;
116			/* 0x30 */
117			u8 immediate_data[SWQE2_MAX_IMM];
118			/* 0xd0 */
119			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
120		} immdata_desc __attribute__ ((packed));
121
122		/*  Send WQE Format 3 */
123		struct {
124			u8 immediate_data[SWQE3_MAX_IMM];
125		} immdata_nodesc;
126	} u;
127};
128
129struct ehea_rwqe {
130	u64 wr_id;		/* work request ID */
131	u8 reserved1[5];
132	u8 data_segments;
133	u16 reserved2;
134	u64 reserved3;
135	u64 reserved4;
136	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
137};
138
139#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
140
141#define EHEA_CQE_TYPE_RQ           0x60
142#define EHEA_CQE_STAT_ERR_MASK     0x721F
143#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
144#define EHEA_CQE_STAT_ERR_TCP      0x4000
145#define EHEA_CQE_STAT_ERR_IP       0x2000
146#define EHEA_CQE_STAT_ERR_CRC      0x1000
147
148struct ehea_cqe {
149	u64 wr_id;		/* work request ID from WQE */
150	u8 type;
151	u8 valid;
152	u16 status;
153	u16 reserved1;
154	u16 num_bytes_transfered;
155	u16 vlan_tag;
156	u16 inet_checksum_value;
157	u8 reserved2;
158	u8 header_length;
159	u16 reserved3;
160	u16 page_offset;
161	u16 wqe_count;
162	u32 qp_token;
163	u32 timestamp;
164	u32 reserved4;
165	u64 reserved5[3];
166};
167
168#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
169#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
170#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
171#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
172#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
173#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
174#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
175#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
176#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
177#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
178#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
179#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
180
181struct ehea_eqe {
182	u64 entry;
183};
184
185#define ERROR_DATA_LENGTH  EHEA_BMASK_IBM(52,63)
186#define ERROR_DATA_TYPE    EHEA_BMASK_IBM(0,7)
187
188static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
189{
190	struct ehea_page *current_page;
191
192	if (q_offset >= queue->queue_length)
193		q_offset -= queue->queue_length;
194	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
195	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
196}
197
198static inline void *hw_qeit_get(struct hw_queue *queue)
199{
200	return hw_qeit_calc(queue, queue->current_q_offset);
201}
202
203static inline void hw_qeit_inc(struct hw_queue *queue)
204{
205	queue->current_q_offset += queue->qe_size;
206	if (queue->current_q_offset >= queue->queue_length) {
207		queue->current_q_offset = 0;
208		/* toggle the valid flag */
209		queue->toggle_state = (~queue->toggle_state) & 1;
210	}
211}
212
213static inline void *hw_qeit_get_inc(struct hw_queue *queue)
214{
215	void *retvalue = hw_qeit_get(queue);
216	hw_qeit_inc(queue);
217	return retvalue;
218}
219
220static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
221{
222	struct ehea_cqe *retvalue = hw_qeit_get(queue);
223	u8 valid = retvalue->valid;
224	void *pref;
225
226	if ((valid >> 7) == (queue->toggle_state & 1)) {
227		/* this is a good one */
228		hw_qeit_inc(queue);
229		pref = hw_qeit_calc(queue, queue->current_q_offset);
230		prefetch(pref);
231		prefetch(pref + 128);
232	} else
233		retvalue = NULL;
234	return retvalue;
235}
236
237static inline void *hw_qeit_get_valid(struct hw_queue *queue)
238{
239	struct ehea_cqe *retvalue = hw_qeit_get(queue);
240	void *pref;
241	u8 valid;
242
243	pref = hw_qeit_calc(queue, queue->current_q_offset);
244	prefetch(pref);
245	prefetch(pref + 128);
246	prefetch(pref + 256);
247	valid = retvalue->valid;
248	if (!((valid >> 7) == (queue->toggle_state & 1)))
249		retvalue = NULL;
250	return retvalue;
251}
252
253static inline void *hw_qeit_reset(struct hw_queue *queue)
254{
255	queue->current_q_offset = 0;
256	return hw_qeit_get(queue);
257}
258
259static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
260{
261	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
262	void *retvalue;
263
264	retvalue = hw_qeit_get(queue);
265	queue->current_q_offset += queue->qe_size;
266	if (queue->current_q_offset > last_entry_in_q) {
267		queue->current_q_offset = 0;
268		queue->toggle_state = (~queue->toggle_state) & 1;
269	}
270	return retvalue;
271}
272
273static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
274{
275	void *retvalue = hw_qeit_get(queue);
276	u32 qe = *(u8*)retvalue;
277	if ((qe >> 7) == (queue->toggle_state & 1))
278		hw_qeit_eq_get_inc(queue);
279	else
280		retvalue = NULL;
281	return retvalue;
282}
283
284static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
285						   int rq_nr)
286{
287	struct hw_queue *queue;
288
289	if (rq_nr == 1)
290		queue = &qp->hw_rqueue1;
291	else if (rq_nr == 2)
292		queue = &qp->hw_rqueue2;
293	else
294		queue = &qp->hw_rqueue3;
295
296	return hw_qeit_get_inc(queue);
297}
298
299static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
300					      int *wqe_index)
301{
302	struct hw_queue *queue = &my_qp->hw_squeue;
303	struct ehea_swqe *wqe_p;
304
305	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
306	wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
307
308	return wqe_p;
309}
310
311static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
312{
313	iosync();
314	ehea_update_sqa(my_qp, 1);
315}
316
317static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
318{
319	struct hw_queue *queue = &qp->hw_rqueue1;
320
321	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
322	return hw_qeit_get_valid(queue);
323}
324
325static inline void ehea_inc_cq(struct ehea_cq *cq)
326{
327	hw_qeit_inc(&cq->hw_queue);
328}
329
330static inline void ehea_inc_rq1(struct ehea_qp *qp)
331{
332	hw_qeit_inc(&qp->hw_rqueue1);
333}
334
335static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
336{
337	return hw_qeit_get_valid(&my_cq->hw_queue);
338}
339
340#define EHEA_CQ_REGISTER_ORIG 0
341#define EHEA_EQ_REGISTER_ORIG 0
342
343enum ehea_eq_type {
344	EHEA_EQ = 0,		/* event queue              */
345	EHEA_NEQ		/* notification event queue */
346};
347
348struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
349			       enum ehea_eq_type type,
350			       const u32 length, const u8 eqe_gen);
351
352int ehea_destroy_eq(struct ehea_eq *eq);
353
354struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
355
356struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
357			       u64 eq_handle, u32 cq_token);
358
359int ehea_destroy_cq(struct ehea_cq *cq);
360
361struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
362			       struct ehea_qp_init_attr *init_attr);
363
364int ehea_destroy_qp(struct ehea_qp *qp);
365
366int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
367
368int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
369		 struct ehea_mr *shared_mr);
370
371int ehea_rem_mr(struct ehea_mr *mr);
372
373void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
374
375#endif	/* __EHEA_QMR_H__ */
376