1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 *    substantially similar to the "NO WARRANTY" disclaimer below
15 *    ("Disclaimer") and any redistribution must be conditioned upon
16 *    including a substantially similar Disclaimer requirement for further
17 *    binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGES.
31 *
32 * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
33 *          Alan Somers         (Spectra Logic Corporation)
34 *          John Suykerbuyk     (Spectra Logic Corporation)
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD$");
39
40/**
41 * \file netback_unit_tests.c
42 *
43 * \brief Unit tests for the Xen netback driver.
44 *
45 * Due to the driver's use of static functions, these tests cannot be compiled
46 * standalone; they must be #include'd from the driver's .c file.
47 */
48
49/** Helper macro used to snprintf to a buffer and update the buffer pointer */
50#define	SNCATF(buffer, buflen, ...) do {				\
51	size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__);	\
52	buffer += new_chars;						\
53	/* be careful; snprintf's return value can be  > buflen */	\
54	buflen -= MIN(buflen, new_chars);				\
55} while (0)
56
57/* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
58#define	STRINGIFY(x) #x
59#define	TOSTRING(x) STRINGIFY(x)
60
61/**
62 * Writes an error message to buffer if cond is false
63 * Note the implied parameters buffer and
64 * buflen
65 */
66#define	XNB_ASSERT(cond) ({						\
67	int passed = (cond);						\
68	char *_buffer = (buffer);					\
69	size_t _buflen = (buflen);					\
70	if (! passed) {							\
71		strlcat(_buffer, __func__, _buflen);			\
72		strlcat(_buffer, ":" TOSTRING(__LINE__) 		\
73		  " Assertion Error: " #cond "\n", _buflen);		\
74	}								\
75	})
76
77/**
78 * The signature used by all testcases.  If the test writes anything
79 * to buffer, then it will be considered a failure
80 * \param buffer	Return storage for error messages
81 * \param buflen	The space available in the buffer
82 */
83typedef void testcase_t(char *buffer, size_t buflen);
84
85/**
86 * Signature used by setup functions
87 * \return nonzero on error
88 */
89typedef int setup_t(void);
90
91typedef void teardown_t(void);
92
93/** A simple test fixture comprising setup, teardown, and test */
94struct test_fixture {
95	/** Will be run before the test to allocate and initialize variables */
96	setup_t *setup;
97
98	/** Will be run if setup succeeds */
99	testcase_t *test;
100
101	/** Cleans up test data whether or not the setup succeeded */
102	teardown_t *teardown;
103};
104
105typedef struct test_fixture test_fixture_t;
106
107static int	xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
108static int	xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
109				     char *buffer, size_t buflen);
110
111static int __unused
112null_setup(void) { return 0; }
113
114static void __unused
115null_teardown(void) { }
116
117static setup_t setup_pvt_data;
118static teardown_t teardown_pvt_data;
119static testcase_t xnb_ring2pkt_emptyring;
120static testcase_t xnb_ring2pkt_1req;
121static testcase_t xnb_ring2pkt_2req;
122static testcase_t xnb_ring2pkt_3req;
123static testcase_t xnb_ring2pkt_extra;
124static testcase_t xnb_ring2pkt_partial;
125static testcase_t xnb_ring2pkt_wraps;
126static testcase_t xnb_txpkt2rsp_emptypkt;
127static testcase_t xnb_txpkt2rsp_1req;
128static testcase_t xnb_txpkt2rsp_extra;
129static testcase_t xnb_txpkt2rsp_long;
130static testcase_t xnb_txpkt2rsp_invalid;
131static testcase_t xnb_txpkt2rsp_error;
132static testcase_t xnb_txpkt2rsp_wraps;
133static testcase_t xnb_pkt2mbufc_empty;
134static testcase_t xnb_pkt2mbufc_short;
135static testcase_t xnb_pkt2mbufc_csum;
136static testcase_t xnb_pkt2mbufc_1cluster;
137static testcase_t xnb_pkt2mbufc_largecluster;
138static testcase_t xnb_pkt2mbufc_2cluster;
139static testcase_t xnb_txpkt2gnttab_empty;
140static testcase_t xnb_txpkt2gnttab_short;
141static testcase_t xnb_txpkt2gnttab_2req;
142static testcase_t xnb_txpkt2gnttab_2cluster;
143static testcase_t xnb_update_mbufc_short;
144static testcase_t xnb_update_mbufc_2req;
145static testcase_t xnb_update_mbufc_2cluster;
146static testcase_t xnb_mbufc2pkt_empty;
147static testcase_t xnb_mbufc2pkt_short;
148static testcase_t xnb_mbufc2pkt_1cluster;
149static testcase_t xnb_mbufc2pkt_2short;
150static testcase_t xnb_mbufc2pkt_long;
151static testcase_t xnb_mbufc2pkt_extra;
152static testcase_t xnb_mbufc2pkt_nospace;
153static testcase_t xnb_rxpkt2gnttab_empty;
154static testcase_t xnb_rxpkt2gnttab_short;
155static testcase_t xnb_rxpkt2gnttab_2req;
156static testcase_t xnb_rxpkt2rsp_empty;
157static testcase_t xnb_rxpkt2rsp_short;
158static testcase_t xnb_rxpkt2rsp_extra;
159static testcase_t xnb_rxpkt2rsp_2short;
160static testcase_t xnb_rxpkt2rsp_2slots;
161static testcase_t xnb_rxpkt2rsp_copyerror;
162static testcase_t xnb_sscanf_llu;
163static testcase_t xnb_sscanf_lld;
164static testcase_t xnb_sscanf_hhu;
165static testcase_t xnb_sscanf_hhd;
166static testcase_t xnb_sscanf_hhn;
167
168#if defined(INET) || defined(INET6)
169/* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
170static testcase_t xnb_add_mbuf_cksum_arp;
171static testcase_t xnb_add_mbuf_cksum_tcp;
172static testcase_t xnb_add_mbuf_cksum_udp;
173static testcase_t xnb_add_mbuf_cksum_icmp;
174static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
175static void	xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
176				   uint16_t ip_id, uint16_t ip_p,
177				   uint16_t ip_off, uint16_t ip_sum);
178static void	xnb_fill_tcp(struct mbuf *m);
179#endif /* INET || INET6 */
180
181/** Private data used by unit tests */
182static struct {
183	gnttab_copy_table 	gnttab;
184	netif_rx_back_ring_t	rxb;
185	netif_rx_front_ring_t	rxf;
186	netif_tx_back_ring_t	txb;
187	netif_tx_front_ring_t	txf;
188	struct ifnet*		ifp;
189	netif_rx_sring_t*	rxs;
190	netif_tx_sring_t*	txs;
191} xnb_unit_pvt;
192
193static inline void safe_m_freem(struct mbuf **ppMbuf) {
194	if (*ppMbuf != NULL) {
195		m_freem(*ppMbuf);
196		*ppMbuf = NULL;
197	}
198}
199
200/**
201 * The unit test runner.  It will run every supplied test and return an
202 * output message as a string
203 * \param tests		An array of tests.  Every test will be attempted.
204 * \param ntests	The length of tests
205 * \param buffer	Return storage for the result string
206 * \param buflen	The length of buffer
207 * \return		The number of tests that failed
208 */
209static int
210xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
211    		     size_t buflen)
212{
213	int i;
214	int n_passes;
215	int n_failures = 0;
216
217	for (i = 0; i < ntests; i++) {
218		int error = tests[i].setup();
219		if (error != 0) {
220			SNCATF(buffer, buflen,
221			    "Setup failed for test idx %d\n", i);
222			n_failures++;
223		} else {
224			size_t new_chars;
225
226			tests[i].test(buffer, buflen);
227			new_chars = strnlen(buffer, buflen);
228			buffer += new_chars;
229			buflen -= new_chars;
230
231			if (new_chars > 0) {
232				n_failures++;
233			}
234		}
235		tests[i].teardown();
236	}
237
238	n_passes = ntests - n_failures;
239	if (n_passes > 0) {
240		SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
241	}
242	if (n_failures > 0) {
243		SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
244	}
245
246	return n_failures;
247}
248
249/** Number of unit tests.  Must match the length of the tests array below */
250#define	TOTAL_TESTS	(53)
251/**
252 * Max memory available for returning results.  400 chars/test should give
253 * enough space for a five line error message for every test
254 */
255#define	TOTAL_BUFLEN	(400 * TOTAL_TESTS + 2)
256
257/**
258 * Called from userspace by a sysctl.  Runs all internal unit tests, and
259 * returns the results to userspace as a string
260 * \param oidp	unused
261 * \param arg1	pointer to an xnb_softc for a specific xnb device
262 * \param arg2	unused
263 * \param req	sysctl access structure
264 * \return a string via the special SYSCTL_OUT macro.
265 */
266
267static int
268xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
269	test_fixture_t const tests[TOTAL_TESTS] = {
270		{setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
271		{setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
272		{setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
273		{setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
274		{setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
275		{setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
276		{setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
277		{setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
278		{setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
279		{setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
280		{setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
281		{setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
282		{setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
283		{setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
284		{setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
285		{setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
286		{setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
287		{setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
288		{setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
289		{setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
290		{setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
291		{setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
292		{setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
293		{setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
294		{setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
295		{setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
296		{setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
297		{setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
298		{setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
299		{setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
300		{setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
301		{setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
302		{setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
303		{setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
304		{setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
305		{setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
306		{setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
307		{setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
308		{setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
309		{setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
310		{setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
311		{setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
312		{setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
313#if defined(INET) || defined(INET6)
314		{null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
315		{null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
316		{null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
317		{null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
318		{null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
319#endif
320		{null_setup, xnb_sscanf_hhd, null_teardown},
321		{null_setup, xnb_sscanf_hhu, null_teardown},
322		{null_setup, xnb_sscanf_lld, null_teardown},
323		{null_setup, xnb_sscanf_llu, null_teardown},
324		{null_setup, xnb_sscanf_hhn, null_teardown},
325	};
326	/**
327	 * results is static so that the data will persist after this function
328	 * returns.  The sysctl code expects us to return a constant string.
329	 * \todo: the static variable is not thread safe.  Put a mutex around
330	 * it.
331	 */
332	static char results[TOTAL_BUFLEN];
333
334	/* empty the result strings */
335	results[0] = 0;
336	xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
337
338	return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
339}
340
341static int
342setup_pvt_data(void)
343{
344	int error = 0;
345
346	bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
347
348	xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
349	if (xnb_unit_pvt.txs != NULL) {
350		SHARED_RING_INIT(xnb_unit_pvt.txs);
351		BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
352		FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
353	} else {
354		error = 1;
355	}
356
357	xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
358	if (xnb_unit_pvt.ifp == NULL) {
359		error = 1;
360	}
361
362	xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
363	if (xnb_unit_pvt.rxs != NULL) {
364		SHARED_RING_INIT(xnb_unit_pvt.rxs);
365		BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
366		FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
367	} else {
368		error = 1;
369	}
370
371	return error;
372}
373
374static void
375teardown_pvt_data(void)
376{
377	if (xnb_unit_pvt.txs != NULL) {
378		free(xnb_unit_pvt.txs, M_XENNETBACK);
379	}
380	if (xnb_unit_pvt.rxs != NULL) {
381		free(xnb_unit_pvt.rxs, M_XENNETBACK);
382	}
383	if (xnb_unit_pvt.ifp != NULL) {
384		if_free(xnb_unit_pvt.ifp);
385	}
386}
387
388/**
389 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
390 */
391static void
392xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
393{
394	struct xnb_pkt pkt;
395	int num_consumed;
396
397	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
398	                            xnb_unit_pvt.txb.req_cons);
399	XNB_ASSERT(num_consumed == 0);
400}
401
402/**
403 * Verify that xnb_ring2pkt can convert a single request packet correctly
404 */
405static void
406xnb_ring2pkt_1req(char *buffer, size_t buflen)
407{
408	struct xnb_pkt pkt;
409	int num_consumed;
410	struct netif_tx_request *req;
411
412	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
413	    xnb_unit_pvt.txf.req_prod_pvt);
414
415	req->flags = 0;
416	req->size = 69;	/* arbitrary number for test */
417	xnb_unit_pvt.txf.req_prod_pvt++;
418
419	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
420
421	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
422	                            xnb_unit_pvt.txb.req_cons);
423	XNB_ASSERT(num_consumed == 1);
424	XNB_ASSERT(pkt.size == 69);
425	XNB_ASSERT(pkt.car_size == 69);
426	XNB_ASSERT(pkt.flags == 0);
427	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
428	XNB_ASSERT(pkt.list_len == 1);
429	XNB_ASSERT(pkt.car == 0);
430}
431
432/**
433 * Verify that xnb_ring2pkt can convert a two request packet correctly.
434 * This tests handling of the MORE_DATA flag and cdr
435 */
436static void
437xnb_ring2pkt_2req(char *buffer, size_t buflen)
438{
439	struct xnb_pkt pkt;
440	int num_consumed;
441	struct netif_tx_request *req;
442	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
443
444	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
445	    xnb_unit_pvt.txf.req_prod_pvt);
446	req->flags = NETTXF_more_data;
447	req->size = 100;
448	xnb_unit_pvt.txf.req_prod_pvt++;
449
450	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
451	    xnb_unit_pvt.txf.req_prod_pvt);
452	req->flags = 0;
453	req->size = 40;
454	xnb_unit_pvt.txf.req_prod_pvt++;
455
456	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
457
458	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
459	                            xnb_unit_pvt.txb.req_cons);
460	XNB_ASSERT(num_consumed == 2);
461	XNB_ASSERT(pkt.size == 100);
462	XNB_ASSERT(pkt.car_size == 60);
463	XNB_ASSERT(pkt.flags == 0);
464	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
465	XNB_ASSERT(pkt.list_len == 2);
466	XNB_ASSERT(pkt.car == start_idx);
467	XNB_ASSERT(pkt.cdr == start_idx + 1);
468}
469
470/**
471 * Verify that xnb_ring2pkt can convert a three request packet correctly
472 */
473static void
474xnb_ring2pkt_3req(char *buffer, size_t buflen)
475{
476	struct xnb_pkt pkt;
477	int num_consumed;
478	struct netif_tx_request *req;
479	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
480
481	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
482	    xnb_unit_pvt.txf.req_prod_pvt);
483	req->flags = NETTXF_more_data;
484	req->size = 200;
485	xnb_unit_pvt.txf.req_prod_pvt++;
486
487	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
488	    xnb_unit_pvt.txf.req_prod_pvt);
489	req->flags = NETTXF_more_data;
490	req->size = 40;
491	xnb_unit_pvt.txf.req_prod_pvt++;
492
493	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
494	    xnb_unit_pvt.txf.req_prod_pvt);
495	req->flags = 0;
496	req->size = 50;
497	xnb_unit_pvt.txf.req_prod_pvt++;
498
499	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
500
501	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
502	                            xnb_unit_pvt.txb.req_cons);
503	XNB_ASSERT(num_consumed == 3);
504	XNB_ASSERT(pkt.size == 200);
505	XNB_ASSERT(pkt.car_size == 110);
506	XNB_ASSERT(pkt.flags == 0);
507	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
508	XNB_ASSERT(pkt.list_len == 3);
509	XNB_ASSERT(pkt.car == start_idx);
510	XNB_ASSERT(pkt.cdr == start_idx + 1);
511	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
512}
513
514/**
515 * Verify that xnb_ring2pkt can read extra inf
516 */
517static void
518xnb_ring2pkt_extra(char *buffer, size_t buflen)
519{
520	struct xnb_pkt pkt;
521	int num_consumed;
522	struct netif_tx_request *req;
523	struct netif_extra_info *ext;
524	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
525
526	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
527	    xnb_unit_pvt.txf.req_prod_pvt);
528	req->flags = NETTXF_extra_info | NETTXF_more_data;
529	req->size = 150;
530	xnb_unit_pvt.txf.req_prod_pvt++;
531
532	ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
533	    xnb_unit_pvt.txf.req_prod_pvt);
534	ext->flags = 0;
535	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
536	ext->u.gso.size = 250;
537	ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
538	ext->u.gso.features = 0;
539	xnb_unit_pvt.txf.req_prod_pvt++;
540
541	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
542	    xnb_unit_pvt.txf.req_prod_pvt);
543	req->flags = 0;
544	req->size = 50;
545	xnb_unit_pvt.txf.req_prod_pvt++;
546
547	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
548
549	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
550	                            xnb_unit_pvt.txb.req_cons);
551	XNB_ASSERT(num_consumed == 3);
552	XNB_ASSERT(pkt.extra.flags == 0);
553	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
554	XNB_ASSERT(pkt.extra.u.gso.size == 250);
555	XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
556	XNB_ASSERT(pkt.size == 150);
557	XNB_ASSERT(pkt.car_size == 100);
558	XNB_ASSERT(pkt.flags == NETTXF_extra_info);
559	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
560	XNB_ASSERT(pkt.list_len == 2);
561	XNB_ASSERT(pkt.car == start_idx);
562	XNB_ASSERT(pkt.cdr == start_idx + 2);
563	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
564}
565
566/**
567 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
568 * not yet in the ring
569 */
570static void
571xnb_ring2pkt_partial(char *buffer, size_t buflen)
572{
573	struct xnb_pkt pkt;
574	int num_consumed;
575	struct netif_tx_request *req;
576
577	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
578	    xnb_unit_pvt.txf.req_prod_pvt);
579	req->flags = NETTXF_more_data;
580	req->size = 150;
581	xnb_unit_pvt.txf.req_prod_pvt++;
582
583	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
584
585	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
586	                            xnb_unit_pvt.txb.req_cons);
587	XNB_ASSERT(num_consumed == 0);
588	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
589}
590
591/**
592 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
593 * the end of the ring
594 */
595static void
596xnb_ring2pkt_wraps(char *buffer, size_t buflen)
597{
598	struct xnb_pkt pkt;
599	int num_consumed;
600	struct netif_tx_request *req;
601	unsigned int rsize;
602
603	/*
604	 * Manually tweak the ring indices to create a ring with no responses
605	 * and the next request slot at position 2 from the end
606	 */
607	rsize = RING_SIZE(&xnb_unit_pvt.txf);
608	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
609	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
610	xnb_unit_pvt.txs->req_prod = rsize - 2;
611	xnb_unit_pvt.txs->req_event = rsize - 1;
612	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
613	xnb_unit_pvt.txs->rsp_event = rsize - 1;
614	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
615	xnb_unit_pvt.txb.req_cons = rsize - 2;
616
617	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
618	    xnb_unit_pvt.txf.req_prod_pvt);
619	req->flags = NETTXF_more_data;
620	req->size = 550;
621	xnb_unit_pvt.txf.req_prod_pvt++;
622
623	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
624	    xnb_unit_pvt.txf.req_prod_pvt);
625	req->flags = NETTXF_more_data;
626	req->size = 100;
627	xnb_unit_pvt.txf.req_prod_pvt++;
628
629	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
630	    xnb_unit_pvt.txf.req_prod_pvt);
631	req->flags = 0;
632	req->size = 50;
633	xnb_unit_pvt.txf.req_prod_pvt++;
634
635	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
636
637	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
638	                            xnb_unit_pvt.txb.req_cons);
639	XNB_ASSERT(num_consumed == 3);
640	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
641	XNB_ASSERT(pkt.list_len == 3);
642	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
643}
644
645/**
646 * xnb_txpkt2rsp should do nothing for an empty packet
647 */
648static void
649xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
650{
651	int num_consumed;
652	struct xnb_pkt pkt;
653	netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
654	netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
655	pkt.list_len = 0;
656
657	/* must call xnb_ring2pkt just to intialize pkt */
658	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
659	                            xnb_unit_pvt.txb.req_cons);
660	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
661	XNB_ASSERT(
662	    memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
663	XNB_ASSERT(
664	    memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
665}
666
667/**
668 * xnb_txpkt2rsp responding to one request
669 */
670static void
671xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
672{
673	uint16_t num_consumed;
674	struct xnb_pkt pkt;
675	struct netif_tx_request *req;
676	struct netif_tx_response *rsp;
677
678	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
679	    xnb_unit_pvt.txf.req_prod_pvt);
680	req->size = 1000;
681	req->flags = 0;
682	xnb_unit_pvt.txf.req_prod_pvt++;
683
684	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
685
686	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
687	                            xnb_unit_pvt.txb.req_cons);
688	xnb_unit_pvt.txb.req_cons += num_consumed;
689
690	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
691	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
692
693	XNB_ASSERT(
694	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
695	XNB_ASSERT(rsp->id == req->id);
696	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
697};
698
699/**
700 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
701 */
702static void
703xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
704{
705	uint16_t num_consumed;
706	struct xnb_pkt pkt;
707	struct netif_tx_request *req;
708	netif_extra_info_t *ext;
709	struct netif_tx_response *rsp;
710
711	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
712	    xnb_unit_pvt.txf.req_prod_pvt);
713	req->size = 1000;
714	req->flags = NETTXF_extra_info;
715	req->id = 69;
716	xnb_unit_pvt.txf.req_prod_pvt++;
717
718	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
719	    xnb_unit_pvt.txf.req_prod_pvt);
720	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
721	ext->flags = 0;
722	xnb_unit_pvt.txf.req_prod_pvt++;
723
724	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
725
726	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
727	                            xnb_unit_pvt.txb.req_cons);
728	xnb_unit_pvt.txb.req_cons += num_consumed;
729
730	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
731
732	XNB_ASSERT(
733	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
734
735	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
736	XNB_ASSERT(rsp->id == req->id);
737	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
738
739	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
740	    xnb_unit_pvt.txf.rsp_cons + 1);
741	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
742};
743
744/**
745 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
746 */
747static void
748xnb_txpkt2rsp_long(char *buffer, size_t buflen)
749{
750	uint16_t num_consumed;
751	struct xnb_pkt pkt;
752	struct netif_tx_request *req;
753	netif_extra_info_t *ext;
754	struct netif_tx_response *rsp;
755
756	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
757	    xnb_unit_pvt.txf.req_prod_pvt);
758	req->size = 1000;
759	req->flags = NETTXF_extra_info | NETTXF_more_data;
760	req->id = 254;
761	xnb_unit_pvt.txf.req_prod_pvt++;
762
763	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
764	    xnb_unit_pvt.txf.req_prod_pvt);
765	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
766	ext->flags = 0;
767	xnb_unit_pvt.txf.req_prod_pvt++;
768
769	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
770	    xnb_unit_pvt.txf.req_prod_pvt);
771	req->size = 300;
772	req->flags = NETTXF_more_data;
773	req->id = 1034;
774	xnb_unit_pvt.txf.req_prod_pvt++;
775
776	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
777	    xnb_unit_pvt.txf.req_prod_pvt);
778	req->size = 400;
779	req->flags = 0;
780	req->id = 34;
781	xnb_unit_pvt.txf.req_prod_pvt++;
782
783	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
784
785	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
786	                            xnb_unit_pvt.txb.req_cons);
787	xnb_unit_pvt.txb.req_cons += num_consumed;
788
789	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
790
791	XNB_ASSERT(
792	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
793
794	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
795	XNB_ASSERT(rsp->id ==
796	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
797	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
798
799	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
800	    xnb_unit_pvt.txf.rsp_cons + 1);
801	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
802
803	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
804	    xnb_unit_pvt.txf.rsp_cons + 2);
805	XNB_ASSERT(rsp->id ==
806	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
807	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
808
809	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
810	    xnb_unit_pvt.txf.rsp_cons + 3);
811	XNB_ASSERT(rsp->id ==
812	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
813	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
814}
815
816/**
817 * xnb_txpkt2rsp responding to an invalid packet.
818 * Note: this test will result in an error message being printed to the console
819 * such as:
820 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255.  Discarding packet
821 */
822static void
823xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
824{
825	uint16_t num_consumed;
826	struct xnb_pkt pkt;
827	struct netif_tx_request *req;
828	netif_extra_info_t *ext;
829	struct netif_tx_response *rsp;
830
831	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
832	    xnb_unit_pvt.txf.req_prod_pvt);
833	req->size = 1000;
834	req->flags = NETTXF_extra_info;
835	req->id = 69;
836	xnb_unit_pvt.txf.req_prod_pvt++;
837
838	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
839	    xnb_unit_pvt.txf.req_prod_pvt);
840	ext->type = 0xFF;	/* Invalid extra type */
841	ext->flags = 0;
842	xnb_unit_pvt.txf.req_prod_pvt++;
843
844	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
845
846	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
847	                            xnb_unit_pvt.txb.req_cons);
848	xnb_unit_pvt.txb.req_cons += num_consumed;
849	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
850
851	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
852
853	XNB_ASSERT(
854	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
855
856	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
857	XNB_ASSERT(rsp->id == req->id);
858	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
859
860	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
861	    xnb_unit_pvt.txf.rsp_cons + 1);
862	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
863};
864
865/**
866 * xnb_txpkt2rsp responding to one request which caused an error
867 */
868static void
869xnb_txpkt2rsp_error(char *buffer, size_t buflen)
870{
871	uint16_t num_consumed;
872	struct xnb_pkt pkt;
873	struct netif_tx_request *req;
874	struct netif_tx_response *rsp;
875
876	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
877	    xnb_unit_pvt.txf.req_prod_pvt);
878	req->size = 1000;
879	req->flags = 0;
880	xnb_unit_pvt.txf.req_prod_pvt++;
881
882	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
883
884	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
885	                            xnb_unit_pvt.txb.req_cons);
886	xnb_unit_pvt.txb.req_cons += num_consumed;
887
888	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
889	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
890
891	XNB_ASSERT(
892	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
893	XNB_ASSERT(rsp->id == req->id);
894	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
895};
896
897/**
898 * xnb_txpkt2rsp's responses wrap around the end of the ring
899 */
900static void
901xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
902{
903	struct xnb_pkt pkt;
904	int num_consumed;
905	struct netif_tx_request *req;
906	struct netif_tx_response *rsp;
907	unsigned int rsize;
908
909	/*
910	 * Manually tweak the ring indices to create a ring with no responses
911	 * and the next request slot at position 2 from the end
912	 */
913	rsize = RING_SIZE(&xnb_unit_pvt.txf);
914	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
915	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
916	xnb_unit_pvt.txs->req_prod = rsize - 2;
917	xnb_unit_pvt.txs->req_event = rsize - 1;
918	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
919	xnb_unit_pvt.txs->rsp_event = rsize - 1;
920	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
921	xnb_unit_pvt.txb.req_cons = rsize - 2;
922
923	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
924	    xnb_unit_pvt.txf.req_prod_pvt);
925	req->flags = NETTXF_more_data;
926	req->size = 550;
927	req->id = 1;
928	xnb_unit_pvt.txf.req_prod_pvt++;
929
930	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
931	    xnb_unit_pvt.txf.req_prod_pvt);
932	req->flags = NETTXF_more_data;
933	req->size = 100;
934	req->id = 2;
935	xnb_unit_pvt.txf.req_prod_pvt++;
936
937	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
938	    xnb_unit_pvt.txf.req_prod_pvt);
939	req->flags = 0;
940	req->size = 50;
941	req->id = 3;
942	xnb_unit_pvt.txf.req_prod_pvt++;
943
944	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
945
946	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
947	                            xnb_unit_pvt.txb.req_cons);
948
949	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
950
951	XNB_ASSERT(
952	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
953	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
954	    xnb_unit_pvt.txf.rsp_cons + 2);
955	XNB_ASSERT(rsp->id == req->id);
956	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
957}
958
959/**
960 * Helper function used to setup pkt2mbufc tests
961 * \param size     size in bytes of the single request to push to the ring
962 * \param flags		optional flags to put in the netif request
963 * \param[out] pkt the returned packet object
964 * \return number of requests consumed from the ring
965 */
966static int
967xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
968{
969	struct netif_tx_request *req;
970
971	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
972	    xnb_unit_pvt.txf.req_prod_pvt);
973	req->flags = flags;
974	req->size = size;
975	xnb_unit_pvt.txf.req_prod_pvt++;
976
977	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
978
979	return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
980	                            xnb_unit_pvt.txb.req_cons);
981}
982
983/**
984 * xnb_pkt2mbufc on an empty packet
985 */
986static void
987xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
988{
989	int num_consumed;
990	struct xnb_pkt pkt;
991	struct mbuf *pMbuf;
992	pkt.list_len = 0;
993
994	/* must call xnb_ring2pkt just to intialize pkt */
995	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
996	                            xnb_unit_pvt.txb.req_cons);
997	pkt.size = 0;
998	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
999	safe_m_freem(&pMbuf);
1000}
1001
1002/**
1003 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1004 */
1005static void
1006xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1007{
1008	const size_t size = MINCLSIZE - 1;
1009	struct xnb_pkt pkt;
1010	struct mbuf *pMbuf;
1011
1012	xnb_get1pkt(&pkt, size, 0);
1013
1014	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1015	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1016	safe_m_freem(&pMbuf);
1017}
1018
1019/**
1020 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1021 */
1022static void
1023xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1024{
1025	const size_t size = MINCLSIZE - 1;
1026	struct xnb_pkt pkt;
1027	struct mbuf *pMbuf;
1028
1029	xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1030
1031	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1032	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1033	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1034	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1035	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1036	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1037	safe_m_freem(&pMbuf);
1038}
1039
1040/**
1041 * xnb_pkt2mbufc on packet that can fit in one cluster
1042 */
1043static void
1044xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1045{
1046	const size_t size = MINCLSIZE;
1047	struct xnb_pkt pkt;
1048	struct mbuf *pMbuf;
1049
1050	xnb_get1pkt(&pkt, size, 0);
1051
1052	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1053	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1054	safe_m_freem(&pMbuf);
1055}
1056
1057/**
1058 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1059 */
1060static void
1061xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1062{
1063	const size_t size = MCLBYTES + 1;
1064	struct xnb_pkt pkt;
1065	struct mbuf *pMbuf;
1066
1067	xnb_get1pkt(&pkt, size, 0);
1068
1069	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1070	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1071	safe_m_freem(&pMbuf);
1072}
1073
1074/**
1075 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1076 */
1077static void
1078xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1079{
1080	const size_t size = 2 * MCLBYTES + 1;
1081	size_t space = 0;
1082	struct xnb_pkt pkt;
1083	struct mbuf *pMbuf;
1084	struct mbuf *m;
1085
1086	xnb_get1pkt(&pkt, size, 0);
1087
1088	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1089
1090	for (m = pMbuf; m != NULL; m = m->m_next) {
1091		space += M_TRAILINGSPACE(m);
1092	}
1093	XNB_ASSERT(space >= size);
1094	safe_m_freem(&pMbuf);
1095}
1096
1097/**
1098 * xnb_txpkt2gnttab on an empty packet.  Should return empty gnttab
1099 */
1100static void
1101xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1102{
1103	int n_entries;
1104	struct xnb_pkt pkt;
1105	struct mbuf *pMbuf;
1106	pkt.list_len = 0;
1107
1108	/* must call xnb_ring2pkt just to intialize pkt */
1109	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1110	pkt.size = 0;
1111	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1112	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1113	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1114	XNB_ASSERT(n_entries == 0);
1115	safe_m_freem(&pMbuf);
1116}
1117
1118/**
1119 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1120 * and has one request
1121 */
1122static void
1123xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1124{
1125	const size_t size = MINCLSIZE - 1;
1126	int n_entries;
1127	struct xnb_pkt pkt;
1128	struct mbuf *pMbuf;
1129
1130	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1131	    xnb_unit_pvt.txf.req_prod_pvt);
1132	req->flags = 0;
1133	req->size = size;
1134	req->gref = 7;
1135	req->offset = 17;
1136	xnb_unit_pvt.txf.req_prod_pvt++;
1137
1138	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1139
1140	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1141
1142	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1143	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1144	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1145	XNB_ASSERT(n_entries == 1);
1146	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1147	/* flags should indicate gref's for source */
1148	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1149	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1150	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1151	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1152	      mtod(pMbuf, vm_offset_t)));
1153	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1154		virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1155	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1156	safe_m_freem(&pMbuf);
1157}
1158
1159/**
1160 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1161 * mbuf cluster
1162 */
1163static void
1164xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1165{
1166	int n_entries;
1167	struct xnb_pkt pkt;
1168	struct mbuf *pMbuf;
1169
1170	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1171	    xnb_unit_pvt.txf.req_prod_pvt);
1172	req->flags = NETTXF_more_data;
1173	req->size = 1900;
1174	req->gref = 7;
1175	req->offset = 0;
1176	xnb_unit_pvt.txf.req_prod_pvt++;
1177
1178	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1179	    xnb_unit_pvt.txf.req_prod_pvt);
1180	req->flags = 0;
1181	req->size = 500;
1182	req->gref = 8;
1183	req->offset = 0;
1184	xnb_unit_pvt.txf.req_prod_pvt++;
1185
1186	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1187
1188	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1189
1190	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1191	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1192	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1193
1194	XNB_ASSERT(n_entries == 2);
1195	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1196	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1197	      mtod(pMbuf, vm_offset_t)));
1198
1199	XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1200	XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1201	      mtod(pMbuf, vm_offset_t) + 1400));
1202	safe_m_freem(&pMbuf);
1203}
1204
1205/**
1206 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1207 */
1208static void
1209xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1210{
1211	int n_entries;
1212	struct xnb_pkt pkt;
1213	struct mbuf *pMbuf;
1214	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1215
1216	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1217	    xnb_unit_pvt.txf.req_prod_pvt);
1218	req->flags = 0;
1219	req->size = data_this_transaction;
1220	req->gref = 8;
1221	req->offset = 0;
1222	xnb_unit_pvt.txf.req_prod_pvt++;
1223
1224	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1225	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1226
1227	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1228	XNB_ASSERT(pMbuf != NULL);
1229	if (pMbuf == NULL)
1230		return;
1231
1232	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1233	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1234
1235	if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1236		/* there should be three mbufs and three gnttab entries */
1237		XNB_ASSERT(n_entries == 3);
1238		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1239		XNB_ASSERT(
1240		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1241		      mtod(pMbuf, vm_offset_t)));
1242		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1243
1244		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1245		XNB_ASSERT(
1246		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1247		      mtod(pMbuf->m_next, vm_offset_t)));
1248		XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1249
1250		XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1251		XNB_ASSERT(
1252		    xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1253		      mtod(pMbuf->m_next, vm_offset_t)));
1254		XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1255			    MCLBYTES);
1256	} else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1257		/* there should be two mbufs and two gnttab entries */
1258		XNB_ASSERT(n_entries == 2);
1259		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1260		XNB_ASSERT(
1261		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1262		      mtod(pMbuf, vm_offset_t)));
1263		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1264
1265		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1266		XNB_ASSERT(
1267		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1268		      mtod(pMbuf->m_next, vm_offset_t)));
1269		XNB_ASSERT(
1270		    xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1271
1272	} else {
1273		/* should never get here */
1274		XNB_ASSERT(0);
1275	}
1276	m_freem(pMbuf);
1277}
1278
1279/**
1280 * xnb_update_mbufc on a short packet that only has one gnttab entry
1281 */
1282static void
1283xnb_update_mbufc_short(char *buffer, size_t buflen)
1284{
1285	const size_t size = MINCLSIZE - 1;
1286	int n_entries;
1287	struct xnb_pkt pkt;
1288	struct mbuf *pMbuf;
1289
1290	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1291	    xnb_unit_pvt.txf.req_prod_pvt);
1292	req->flags = 0;
1293	req->size = size;
1294	req->gref = 7;
1295	req->offset = 17;
1296	xnb_unit_pvt.txf.req_prod_pvt++;
1297
1298	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1299
1300	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1301
1302	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1303	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1304	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1305
1306	/* Update grant table's status fields as the hypervisor call would */
1307	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1308
1309	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1310	XNB_ASSERT(pMbuf->m_len == size);
1311	XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1312	safe_m_freem(&pMbuf);
1313}
1314
1315/**
1316 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1317 * mbuf cluster
1318 */
1319static void
1320xnb_update_mbufc_2req(char *buffer, size_t buflen)
1321{
1322	int n_entries;
1323	struct xnb_pkt pkt;
1324	struct mbuf *pMbuf;
1325
1326	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1327	    xnb_unit_pvt.txf.req_prod_pvt);
1328	req->flags = NETTXF_more_data;
1329	req->size = 1900;
1330	req->gref = 7;
1331	req->offset = 0;
1332	xnb_unit_pvt.txf.req_prod_pvt++;
1333
1334	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1335	    xnb_unit_pvt.txf.req_prod_pvt);
1336	req->flags = 0;
1337	req->size = 500;
1338	req->gref = 8;
1339	req->offset = 0;
1340	xnb_unit_pvt.txf.req_prod_pvt++;
1341
1342	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1343
1344	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1345
1346	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1347	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1348	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1349
1350	/* Update grant table's status fields as the hypervisor call would */
1351	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1352	xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1353
1354	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1355	XNB_ASSERT(n_entries == 2);
1356	XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1357	XNB_ASSERT(pMbuf->m_len == 1900);
1358
1359	safe_m_freem(&pMbuf);
1360}
1361
1362/**
1363 * xnb_update_mbufc on a single request that spans two mbuf clusters
1364 */
1365static void
1366xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1367{
1368	int i;
1369	int n_entries;
1370	struct xnb_pkt pkt;
1371	struct mbuf *pMbuf;
1372	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1373
1374	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1375	    xnb_unit_pvt.txf.req_prod_pvt);
1376	req->flags = 0;
1377	req->size = data_this_transaction;
1378	req->gref = 8;
1379	req->offset = 0;
1380	xnb_unit_pvt.txf.req_prod_pvt++;
1381
1382	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1383	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1384
1385	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1386	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1387	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1388
1389	/* Update grant table's status fields */
1390	for (i = 0; i < n_entries; i++) {
1391		xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1392	}
1393	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1394
1395	if (n_entries == 3) {
1396		/* there should be three mbufs and three gnttab entries */
1397		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1398		XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1399		XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1400		XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1401	} else if (n_entries == 2) {
1402		/* there should be two mbufs and two gnttab entries */
1403		XNB_ASSERT(n_entries == 2);
1404		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1405		XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1406		XNB_ASSERT(pMbuf->m_next->m_len == 1);
1407	} else {
1408		/* should never get here */
1409		XNB_ASSERT(0);
1410	}
1411	safe_m_freem(&pMbuf);
1412}
1413
1414/** xnb_mbufc2pkt on an empty mbufc */
1415static void
1416xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1417	struct xnb_pkt pkt;
1418	int free_slots = 64;
1419	struct mbuf *mbuf;
1420
1421	mbuf = m_get(M_WAITOK, MT_DATA);
1422	/*
1423	 * note: it is illegal to set M_PKTHDR on a mbuf with no data.  Doing so
1424	 * will cause m_freem to segfault
1425	 */
1426	XNB_ASSERT(mbuf->m_len == 0);
1427
1428	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1429	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1430
1431	safe_m_freem(&mbuf);
1432}
1433
1434/** xnb_mbufc2pkt on a short mbufc */
1435static void
1436xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1437	struct xnb_pkt pkt;
1438	size_t size = 128;
1439	int free_slots = 64;
1440	RING_IDX start = 9;
1441	struct mbuf *mbuf;
1442
1443	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1444	mbuf->m_flags |= M_PKTHDR;
1445	mbuf->m_pkthdr.len = size;
1446	mbuf->m_len = size;
1447
1448	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1449	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1450	XNB_ASSERT(pkt.size == size);
1451	XNB_ASSERT(pkt.car_size == size);
1452	XNB_ASSERT(! (pkt.flags &
1453	      (NETRXF_more_data | NETRXF_extra_info)));
1454	XNB_ASSERT(pkt.list_len == 1);
1455	XNB_ASSERT(pkt.car == start);
1456
1457	safe_m_freem(&mbuf);
1458}
1459
1460/** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1461static void
1462xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1463	struct xnb_pkt pkt;
1464	size_t size = MCLBYTES;
1465	int free_slots = 32;
1466	RING_IDX start = 12;
1467	struct mbuf *mbuf;
1468
1469	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1470	mbuf->m_flags |= M_PKTHDR;
1471	mbuf->m_pkthdr.len = size;
1472	mbuf->m_len = size;
1473
1474	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1475	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1476	XNB_ASSERT(pkt.size == size);
1477	XNB_ASSERT(pkt.car_size == size);
1478	XNB_ASSERT(! (pkt.flags &
1479	      (NETRXF_more_data | NETRXF_extra_info)));
1480	XNB_ASSERT(pkt.list_len == 1);
1481	XNB_ASSERT(pkt.car == start);
1482
1483	safe_m_freem(&mbuf);
1484}
1485
1486/** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1487static void
1488xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1489	struct xnb_pkt pkt;
1490	size_t size1 = MHLEN - 5;
1491	size_t size2 = MHLEN - 15;
1492	int free_slots = 32;
1493	RING_IDX start = 14;
1494	struct mbuf *mbufc, *mbufc2;
1495
1496	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1497	XNB_ASSERT(mbufc != NULL);
1498	if (mbufc == NULL)
1499		return;
1500	mbufc->m_flags |= M_PKTHDR;
1501
1502	mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1503	XNB_ASSERT(mbufc2 != NULL);
1504	if (mbufc2 == NULL) {
1505		safe_m_freem(&mbufc);
1506		return;
1507	}
1508	mbufc2->m_pkthdr.len = size1 + size2;
1509	mbufc2->m_len = size1;
1510
1511	xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1512	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1513	XNB_ASSERT(pkt.size == size1 + size2);
1514	XNB_ASSERT(pkt.car == start);
1515	/*
1516	 * The second m_getm may allocate a new mbuf and append
1517	 * it to the chain, or it may simply extend the first mbuf.
1518	 */
1519	if (mbufc2->m_next != NULL) {
1520		XNB_ASSERT(pkt.car_size == size1);
1521		XNB_ASSERT(pkt.list_len == 1);
1522		XNB_ASSERT(pkt.cdr == start + 1);
1523	}
1524
1525	safe_m_freem(&mbufc2);
1526}
1527
1528/** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1529static void
1530xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1531	struct xnb_pkt pkt;
1532	size_t size = 14 * MCLBYTES / 3;
1533	size_t size_remaining;
1534	int free_slots = 15;
1535	RING_IDX start = 3;
1536	struct mbuf *mbufc, *m;
1537
1538	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1539	XNB_ASSERT(mbufc != NULL);
1540	if (mbufc == NULL)
1541		return;
1542	mbufc->m_flags |= M_PKTHDR;
1543
1544	mbufc->m_pkthdr.len = size;
1545	size_remaining = size;
1546	for (m = mbufc; m != NULL; m = m->m_next) {
1547		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1548		size_remaining -= m->m_len;
1549	}
1550
1551	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1552	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1553	XNB_ASSERT(pkt.size == size);
1554	XNB_ASSERT(pkt.car == start);
1555	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1556	/*
1557	 * There should be >1 response in the packet, and there is no
1558	 * extra info.
1559	 */
1560	XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1561	XNB_ASSERT(pkt.cdr == pkt.car + 1);
1562
1563	safe_m_freem(&mbufc);
1564}
1565
1566/** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1567static void
1568xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1569	struct xnb_pkt pkt;
1570	size_t size = 14 * MCLBYTES / 3;
1571	size_t size_remaining;
1572	int free_slots = 15;
1573	RING_IDX start = 3;
1574	struct mbuf *mbufc, *m;
1575
1576	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1577	XNB_ASSERT(mbufc != NULL);
1578	if (mbufc == NULL)
1579		return;
1580
1581	mbufc->m_flags |= M_PKTHDR;
1582	mbufc->m_pkthdr.len = size;
1583	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1584	mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1585	size_remaining = size;
1586	for (m = mbufc; m != NULL; m = m->m_next) {
1587		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1588		size_remaining -= m->m_len;
1589	}
1590
1591	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1592	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1593	XNB_ASSERT(pkt.size == size);
1594	XNB_ASSERT(pkt.car == start);
1595	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1596	/* There should be >1 response in the packet, there is extra info */
1597	XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1598	XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1599	XNB_ASSERT(pkt.cdr == pkt.car + 2);
1600	XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1601	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1602	XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1603
1604	safe_m_freem(&mbufc);
1605}
1606
1607/** xnb_mbufc2pkt with insufficient space in the ring */
1608static void
1609xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1610	struct xnb_pkt pkt;
1611	size_t size = 14 * MCLBYTES / 3;
1612	size_t size_remaining;
1613	int free_slots = 2;
1614	RING_IDX start = 3;
1615	struct mbuf *mbufc, *m;
1616	int error;
1617
1618	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1619	XNB_ASSERT(mbufc != NULL);
1620	if (mbufc == NULL)
1621		return;
1622	mbufc->m_flags |= M_PKTHDR;
1623
1624	mbufc->m_pkthdr.len = size;
1625	size_remaining = size;
1626	for (m = mbufc; m != NULL; m = m->m_next) {
1627		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1628		size_remaining -= m->m_len;
1629	}
1630
1631	error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1632	XNB_ASSERT(error == EAGAIN);
1633	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1634
1635	safe_m_freem(&mbufc);
1636}
1637
1638/**
1639 * xnb_rxpkt2gnttab on an empty packet.  Should return empty gnttab
1640 */
1641static void
1642xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1643{
1644	struct xnb_pkt pkt;
1645	int nr_entries;
1646	int free_slots = 60;
1647	struct mbuf *mbuf;
1648
1649	mbuf = m_get(M_WAITOK, MT_DATA);
1650
1651	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1652	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1653			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1654
1655	XNB_ASSERT(nr_entries == 0);
1656
1657	safe_m_freem(&mbuf);
1658}
1659
1660/** xnb_rxpkt2gnttab on a short packet without extra data */
1661static void
1662xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1663	struct xnb_pkt pkt;
1664	int nr_entries;
1665	size_t size = 128;
1666	int free_slots = 60;
1667	RING_IDX start = 9;
1668	struct netif_rx_request *req;
1669	struct mbuf *mbuf;
1670
1671	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1672	mbuf->m_flags |= M_PKTHDR;
1673	mbuf->m_pkthdr.len = size;
1674	mbuf->m_len = size;
1675
1676	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1677	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1678			       xnb_unit_pvt.txf.req_prod_pvt);
1679	req->gref = 7;
1680
1681	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1682				      &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1683
1684	XNB_ASSERT(nr_entries == 1);
1685	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1686	/* flags should indicate gref's for dest */
1687	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1688	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1689	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1690	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1691		   mtod(mbuf, vm_offset_t)));
1692	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1693		   virt_to_mfn(mtod(mbuf, vm_offset_t)));
1694	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1695
1696	safe_m_freem(&mbuf);
1697}
1698
1699/**
1700 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1701 */
1702static void
1703xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1704{
1705	struct xnb_pkt pkt;
1706	int nr_entries;
1707	int i, num_mbufs;
1708	size_t total_granted_size = 0;
1709	size_t size = MJUMPAGESIZE + 1;
1710	int free_slots = 60;
1711	RING_IDX start = 11;
1712	struct netif_rx_request *req;
1713	struct mbuf *mbuf, *m;
1714
1715	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1716	mbuf->m_flags |= M_PKTHDR;
1717	mbuf->m_pkthdr.len = size;
1718	mbuf->m_len = size;
1719
1720	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1721
1722	for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1723		req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1724		    xnb_unit_pvt.txf.req_prod_pvt);
1725		req->gref = i;
1726		req->id = 5;
1727	}
1728	num_mbufs = i;
1729
1730	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1731			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1732
1733	XNB_ASSERT(nr_entries >= num_mbufs);
1734	for (i = 0; i < nr_entries; i++) {
1735		int end_offset = xnb_unit_pvt.gnttab[i].len +
1736			xnb_unit_pvt.gnttab[i].dest.offset;
1737		XNB_ASSERT(end_offset <= PAGE_SIZE);
1738		total_granted_size += xnb_unit_pvt.gnttab[i].len;
1739	}
1740	XNB_ASSERT(total_granted_size == size);
1741}
1742
1743/**
1744 * xnb_rxpkt2rsp on an empty packet.  Shouldn't make any response
1745 */
1746static void
1747xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1748{
1749	struct xnb_pkt pkt;
1750	int nr_entries;
1751	int nr_reqs;
1752	int free_slots = 60;
1753	netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1754	netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1755	struct mbuf *mbuf;
1756
1757	mbuf = m_get(M_WAITOK, MT_DATA);
1758
1759	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1760	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1761			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1762
1763	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1764	    &xnb_unit_pvt.rxb);
1765	XNB_ASSERT(nr_reqs == 0);
1766	XNB_ASSERT(
1767	    memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1768	XNB_ASSERT(
1769	    memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1770
1771	safe_m_freem(&mbuf);
1772}
1773
1774/**
1775 * xnb_rxpkt2rsp on a short packet with no extras
1776 */
1777static void
1778xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1779{
1780	struct xnb_pkt pkt;
1781	int nr_entries, nr_reqs;
1782	size_t size = 128;
1783	int free_slots = 60;
1784	RING_IDX start = 5;
1785	struct netif_rx_request *req;
1786	struct netif_rx_response *rsp;
1787	struct mbuf *mbuf;
1788
1789	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1790	mbuf->m_flags |= M_PKTHDR;
1791	mbuf->m_pkthdr.len = size;
1792	mbuf->m_len = size;
1793
1794	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1795	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1796	req->gref = 7;
1797	xnb_unit_pvt.rxb.req_cons = start;
1798	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1799	xnb_unit_pvt.rxs->req_prod = start + 1;
1800	xnb_unit_pvt.rxs->rsp_prod = start;
1801
1802	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1803			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1804
1805	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1806	    &xnb_unit_pvt.rxb);
1807
1808	XNB_ASSERT(nr_reqs == 1);
1809	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1810	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1811	XNB_ASSERT(rsp->id == req->id);
1812	XNB_ASSERT(rsp->offset == 0);
1813	XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1814	XNB_ASSERT(rsp->status == size);
1815
1816	safe_m_freem(&mbuf);
1817}
1818
1819/**
1820 * xnb_rxpkt2rsp with extra data
1821 */
1822static void
1823xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1824{
1825	struct xnb_pkt pkt;
1826	int nr_entries, nr_reqs;
1827	size_t size = 14;
1828	int free_slots = 15;
1829	RING_IDX start = 3;
1830	uint16_t id = 49;
1831	uint16_t gref = 65;
1832	uint16_t mss = TCP_MSS - 40;
1833	struct mbuf *mbufc;
1834	struct netif_rx_request *req;
1835	struct netif_rx_response *rsp;
1836	struct netif_extra_info *ext;
1837
1838	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1839	XNB_ASSERT(mbufc != NULL);
1840	if (mbufc == NULL)
1841		return;
1842
1843	mbufc->m_flags |= M_PKTHDR;
1844	mbufc->m_pkthdr.len = size;
1845	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1846	mbufc->m_pkthdr.tso_segsz = mss;
1847	mbufc->m_len = size;
1848
1849	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1850	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1851	req->id = id;
1852	req->gref = gref;
1853	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1854	req->id = id + 1;
1855	req->gref = gref + 1;
1856	xnb_unit_pvt.rxb.req_cons = start;
1857	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1858	xnb_unit_pvt.rxs->req_prod = start + 2;
1859	xnb_unit_pvt.rxs->rsp_prod = start;
1860
1861	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1862			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1863
1864	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1865	    &xnb_unit_pvt.rxb);
1866
1867	XNB_ASSERT(nr_reqs == 2);
1868	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1869	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1870	XNB_ASSERT(rsp->id == id);
1871	XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1872	XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1873	XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1874	XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1875	XNB_ASSERT(rsp->status == size);
1876
1877	ext = (struct netif_extra_info*)
1878		RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1879	XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1880	XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1881	XNB_ASSERT(ext->u.gso.size == mss);
1882	XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1883
1884	safe_m_freem(&mbufc);
1885}
1886
1887/**
1888 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data.  It should
1889 * generate two response slot
1890 */
1891static void
1892xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1893{
1894	struct xnb_pkt pkt;
1895	int nr_entries, nr_reqs;
1896	size_t size = PAGE_SIZE + 100;
1897	int free_slots = 3;
1898	uint16_t id1 = 17;
1899	uint16_t id2 = 37;
1900	uint16_t gref1 = 24;
1901	uint16_t gref2 = 34;
1902	RING_IDX start = 15;
1903	struct netif_rx_request *req;
1904	struct netif_rx_response *rsp;
1905	struct mbuf *mbuf;
1906
1907	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1908	mbuf->m_flags |= M_PKTHDR;
1909	mbuf->m_pkthdr.len = size;
1910	if (mbuf->m_next != NULL) {
1911		size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1912		mbuf->m_len = first_len;
1913		mbuf->m_next->m_len = size - first_len;
1914
1915	} else {
1916		mbuf->m_len = size;
1917	}
1918
1919	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1920	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1921	req->gref = gref1;
1922	req->id = id1;
1923	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1924	req->gref = gref2;
1925	req->id = id2;
1926	xnb_unit_pvt.rxb.req_cons = start;
1927	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1928	xnb_unit_pvt.rxs->req_prod = start + 2;
1929	xnb_unit_pvt.rxs->rsp_prod = start;
1930
1931	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1932			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1933
1934	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1935	    &xnb_unit_pvt.rxb);
1936
1937	XNB_ASSERT(nr_reqs == 2);
1938	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1939	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1940	XNB_ASSERT(rsp->id == id1);
1941	XNB_ASSERT(rsp->offset == 0);
1942	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1943	XNB_ASSERT(rsp->flags & NETRXF_more_data);
1944	XNB_ASSERT(rsp->status == PAGE_SIZE);
1945
1946	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1947	XNB_ASSERT(rsp->id == id2);
1948	XNB_ASSERT(rsp->offset == 0);
1949	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1950	XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1951	XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1952
1953	safe_m_freem(&mbuf);
1954}
1955
1956/** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1957static void
1958xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1959	struct xnb_pkt pkt;
1960	int nr_reqs, nr_entries;
1961	size_t size1 = MHLEN - 5;
1962	size_t size2 = MHLEN - 15;
1963	int free_slots = 32;
1964	RING_IDX start = 14;
1965	uint16_t id = 47;
1966	uint16_t gref = 54;
1967	struct netif_rx_request *req;
1968	struct netif_rx_response *rsp;
1969	struct mbuf *mbufc;
1970
1971	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1972	XNB_ASSERT(mbufc != NULL);
1973	if (mbufc == NULL)
1974		return;
1975	mbufc->m_flags |= M_PKTHDR;
1976
1977	m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1978	XNB_ASSERT(mbufc->m_next != NULL);
1979	mbufc->m_pkthdr.len = size1 + size2;
1980	mbufc->m_len = size1;
1981	mbufc->m_next->m_len = size2;
1982
1983	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1984
1985	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1986	req->gref = gref;
1987	req->id = id;
1988	xnb_unit_pvt.rxb.req_cons = start;
1989	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1990	xnb_unit_pvt.rxs->req_prod = start + 1;
1991	xnb_unit_pvt.rxs->rsp_prod = start;
1992
1993	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1994			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1995
1996	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1997	    &xnb_unit_pvt.rxb);
1998
1999	XNB_ASSERT(nr_entries == 2);
2000	XNB_ASSERT(nr_reqs == 1);
2001	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2002	XNB_ASSERT(rsp->id == id);
2003	XNB_ASSERT(rsp->status == size1 + size2);
2004	XNB_ASSERT(rsp->offset == 0);
2005	XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2006
2007	safe_m_freem(&mbufc);
2008}
2009
2010/**
2011 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2012 * Note: this test will result in an error message being printed to the console
2013 * such as:
2014 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2015 */
2016static void
2017xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2018{
2019	struct xnb_pkt pkt;
2020	int nr_entries, nr_reqs;
2021	int id = 7;
2022	int gref = 42;
2023	uint16_t canary = 6859;
2024	size_t size = 7 * MCLBYTES;
2025	int free_slots = 9;
2026	RING_IDX start = 2;
2027	struct netif_rx_request *req;
2028	struct netif_rx_response *rsp;
2029	struct mbuf *mbuf;
2030
2031	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2032	mbuf->m_flags |= M_PKTHDR;
2033	mbuf->m_pkthdr.len = size;
2034	mbuf->m_len = size;
2035
2036	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2037	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2038	req->gref = gref;
2039	req->id = id;
2040	xnb_unit_pvt.rxb.req_cons = start;
2041	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2042	xnb_unit_pvt.rxs->req_prod = start + 1;
2043	xnb_unit_pvt.rxs->rsp_prod = start;
2044	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2045	req->gref = canary;
2046	req->id = canary;
2047
2048	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2049			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2050	/* Inject the error*/
2051	xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2052
2053	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2054	    &xnb_unit_pvt.rxb);
2055
2056	XNB_ASSERT(nr_reqs == 1);
2057	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2058	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2059	XNB_ASSERT(rsp->id == id);
2060	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2061	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2062	XNB_ASSERT(req->gref == canary);
2063	XNB_ASSERT(req->id == canary);
2064
2065	safe_m_freem(&mbuf);
2066}
2067
2068#if defined(INET) || defined(INET6)
2069/**
2070 * xnb_add_mbuf_cksum on an ARP request packet
2071 */
2072static void
2073xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2074{
2075	const size_t pkt_len = sizeof(struct ether_header) +
2076		sizeof(struct ether_arp);
2077	struct mbuf *mbufc;
2078	struct ether_header *eh;
2079	struct ether_arp *ep;
2080	unsigned char pkt_orig[pkt_len];
2081
2082	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2083	/* Fill in an example arp request */
2084	eh = mtod(mbufc, struct ether_header*);
2085	eh->ether_dhost[0] = 0xff;
2086	eh->ether_dhost[1] = 0xff;
2087	eh->ether_dhost[2] = 0xff;
2088	eh->ether_dhost[3] = 0xff;
2089	eh->ether_dhost[4] = 0xff;
2090	eh->ether_dhost[5] = 0xff;
2091	eh->ether_shost[0] = 0x00;
2092	eh->ether_shost[1] = 0x15;
2093	eh->ether_shost[2] = 0x17;
2094	eh->ether_shost[3] = 0xe9;
2095	eh->ether_shost[4] = 0x30;
2096	eh->ether_shost[5] = 0x68;
2097	eh->ether_type = htons(ETHERTYPE_ARP);
2098	ep = (struct ether_arp*)(eh + 1);
2099	ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2100	ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2101	ep->ea_hdr.ar_hln = 6;
2102	ep->ea_hdr.ar_pln = 4;
2103	ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2104	ep->arp_sha[0] = 0x00;
2105	ep->arp_sha[1] = 0x15;
2106	ep->arp_sha[2] = 0x17;
2107	ep->arp_sha[3] = 0xe9;
2108	ep->arp_sha[4] = 0x30;
2109	ep->arp_sha[5] = 0x68;
2110	ep->arp_spa[0] = 0xc0;
2111	ep->arp_spa[1] = 0xa8;
2112	ep->arp_spa[2] = 0x0a;
2113	ep->arp_spa[3] = 0x04;
2114	bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2115	ep->arp_tpa[0] = 0xc0;
2116	ep->arp_tpa[1] = 0xa8;
2117	ep->arp_tpa[2] = 0x0a;
2118	ep->arp_tpa[3] = 0x06;
2119
2120	/* fill in the length field */
2121	mbufc->m_len = pkt_len;
2122	mbufc->m_pkthdr.len = pkt_len;
2123	/* indicate that the netfront uses hw-assisted checksums */
2124	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2125				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2126
2127	/* Make a backup copy of the packet */
2128	bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2129
2130	/* Function under test */
2131	xnb_add_mbuf_cksum(mbufc);
2132
2133	/* Verify that the packet's data did not change */
2134	XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2135	m_freem(mbufc);
2136}
2137
2138/**
2139 * Helper function that populates the ethernet header and IP header used by
2140 * some of the xnb_add_mbuf_cksum unit tests.  m must already be allocated
2141 * and must be large enough
2142 */
2143static void
2144xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2145		   uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2146{
2147	struct ether_header *eh;
2148	struct ip *iph;
2149
2150	eh = mtod(m, struct ether_header*);
2151	eh->ether_dhost[0] = 0x00;
2152	eh->ether_dhost[1] = 0x16;
2153	eh->ether_dhost[2] = 0x3e;
2154	eh->ether_dhost[3] = 0x23;
2155	eh->ether_dhost[4] = 0x50;
2156	eh->ether_dhost[5] = 0x0b;
2157	eh->ether_shost[0] = 0x00;
2158	eh->ether_shost[1] = 0x16;
2159	eh->ether_shost[2] = 0x30;
2160	eh->ether_shost[3] = 0x00;
2161	eh->ether_shost[4] = 0x00;
2162	eh->ether_shost[5] = 0x00;
2163	eh->ether_type = htons(ETHERTYPE_IP);
2164	iph = (struct ip*)(eh + 1);
2165	iph->ip_hl = 0x5;	/* 5 dwords == 20 bytes */
2166	iph->ip_v = 4;		/* IP v4 */
2167	iph->ip_tos = 0;
2168	iph->ip_len = htons(ip_len);
2169	iph->ip_id = htons(ip_id);
2170	iph->ip_off = htons(ip_off);
2171	iph->ip_ttl = 64;
2172	iph->ip_p = ip_p;
2173	iph->ip_sum = htons(ip_sum);
2174	iph->ip_src.s_addr = htonl(0xc0a80a04);
2175	iph->ip_dst.s_addr = htonl(0xc0a80a05);
2176}
2177
2178/**
2179 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2180 * ICMP packet
2181 */
2182static void
2183xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2184{
2185	const size_t icmp_len = 64;	/* set by ping(1) */
2186	const size_t pkt_len = sizeof(struct ether_header) +
2187		sizeof(struct ip) + icmp_len;
2188	struct mbuf *mbufc;
2189	struct ether_header *eh;
2190	struct ip *iph;
2191	struct icmp *icmph;
2192	unsigned char pkt_orig[icmp_len];
2193	uint32_t *tv_field;
2194	uint8_t *data_payload;
2195	int i;
2196	const uint16_t ICMP_CSUM = 0xaed7;
2197	const uint16_t IP_CSUM = 0xe533;
2198
2199	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2200	/* Fill in an example ICMP ping request */
2201	eh = mtod(mbufc, struct ether_header*);
2202	xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2203	iph = (struct ip*)(eh + 1);
2204	icmph = (struct icmp*)(iph + 1);
2205	icmph->icmp_type = ICMP_ECHO;
2206	icmph->icmp_code = 0;
2207	icmph->icmp_cksum = htons(ICMP_CSUM);
2208	icmph->icmp_id = htons(31492);
2209	icmph->icmp_seq = htons(0);
2210	/*
2211	 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2212	 * For this test, we will set the bytes individually for portability.
2213	 */
2214	tv_field = (uint32_t*)(&(icmph->icmp_hun));
2215	tv_field[0] = 0x4f02cfac;
2216	tv_field[1] = 0x0007c46a;
2217	/*
2218	 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2219	 */
2220	data_payload = (uint8_t*)(&tv_field[2]);
2221	for (i = 8; i < 37; i++) {
2222		*data_payload++ = i;
2223	}
2224
2225	/* fill in the length field */
2226	mbufc->m_len = pkt_len;
2227	mbufc->m_pkthdr.len = pkt_len;
2228	/* indicate that the netfront uses hw-assisted checksums */
2229	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2230				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2231
2232	bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2233	/* Function under test */
2234	xnb_add_mbuf_cksum(mbufc);
2235
2236	/* Check the IP checksum */
2237	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2238
2239	/* Check that the ICMP packet did not change */
2240	XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2241	m_freem(mbufc);
2242}
2243
2244/**
2245 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2246 * UDP packet
2247 */
2248static void
2249xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2250{
2251	const size_t udp_len = 16;
2252	const size_t pkt_len = sizeof(struct ether_header) +
2253		sizeof(struct ip) + udp_len;
2254	struct mbuf *mbufc;
2255	struct ether_header *eh;
2256	struct ip *iph;
2257	struct udphdr *udp;
2258	uint8_t *data_payload;
2259	const uint16_t IP_CSUM = 0xe56b;
2260	const uint16_t UDP_CSUM = 0xdde2;
2261
2262	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2263	/* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2264	eh = mtod(mbufc, struct ether_header*);
2265	xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2266	iph = (struct ip*)(eh + 1);
2267	udp = (struct udphdr*)(iph + 1);
2268	udp->uh_sport = htons(0x51ae);
2269	udp->uh_dport = htons(0x08ae);
2270	udp->uh_ulen = htons(udp_len);
2271	udp->uh_sum = htons(0xbaad);  /* xnb_add_mbuf_cksum will fill this in */
2272	data_payload = (uint8_t*)(udp + 1);
2273	data_payload[0] = 'F';
2274	data_payload[1] = 'r';
2275	data_payload[2] = 'e';
2276	data_payload[3] = 'e';
2277	data_payload[4] = 'B';
2278	data_payload[5] = 'S';
2279	data_payload[6] = 'D';
2280	data_payload[7] = '\n';
2281
2282	/* fill in the length field */
2283	mbufc->m_len = pkt_len;
2284	mbufc->m_pkthdr.len = pkt_len;
2285	/* indicate that the netfront uses hw-assisted checksums */
2286	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2287				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2288
2289	/* Function under test */
2290	xnb_add_mbuf_cksum(mbufc);
2291
2292	/* Check the checksums */
2293	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2294	XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2295
2296	m_freem(mbufc);
2297}
2298
2299/**
2300 * Helper function that populates a TCP packet used by all of the
2301 * xnb_add_mbuf_cksum tcp unit tests.  m must already be allocated and must be
2302 * large enough
2303 */
2304static void
2305xnb_fill_tcp(struct mbuf *m)
2306{
2307	struct ether_header *eh;
2308	struct ip *iph;
2309	struct tcphdr *tcp;
2310	uint32_t *options;
2311	uint8_t *data_payload;
2312
2313	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2314	eh = mtod(m, struct ether_header*);
2315	xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2316	iph = (struct ip*)(eh + 1);
2317	tcp = (struct tcphdr*)(iph + 1);
2318	tcp->th_sport = htons(0x9cd9);
2319	tcp->th_dport = htons(2222);
2320	tcp->th_seq = htonl(0x00f72b10);
2321	tcp->th_ack = htonl(0x7f37ba6c);
2322	tcp->th_x2 = 0;
2323	tcp->th_off = 8;
2324	tcp->th_flags = 0x18;
2325	tcp->th_win = htons(0x410);
2326	/* th_sum is incorrect; will be inserted by function under test */
2327	tcp->th_sum = htons(0xbaad);
2328	tcp->th_urp = htons(0);
2329	/*
2330	 * The following 12 bytes of options encode:
2331	 * [nop, nop, TS val 33247 ecr 3457687679]
2332	 */
2333	options = (uint32_t*)(tcp + 1);
2334	options[0] = htonl(0x0101080a);
2335	options[1] = htonl(0x000081df);
2336	options[2] = htonl(0xce18207f);
2337	data_payload = (uint8_t*)(&options[3]);
2338	data_payload[0] = 'F';
2339	data_payload[1] = 'r';
2340	data_payload[2] = 'e';
2341	data_payload[3] = 'e';
2342	data_payload[4] = 'B';
2343	data_payload[5] = 'S';
2344	data_payload[6] = 'D';
2345	data_payload[7] = '\n';
2346}
2347
2348/**
2349 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2350 * packet
2351 */
2352static void
2353xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2354{
2355	const size_t payload_len = 8;
2356	const size_t tcp_options_len = 12;
2357	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2358	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2359	struct mbuf *mbufc;
2360	struct ether_header *eh;
2361	struct ip *iph;
2362	struct tcphdr *tcp;
2363	const uint16_t IP_CSUM = 0xa55a;
2364	const uint16_t TCP_CSUM = 0x2f64;
2365
2366	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2367	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2368	xnb_fill_tcp(mbufc);
2369	eh = mtod(mbufc, struct ether_header*);
2370	iph = (struct ip*)(eh + 1);
2371	tcp = (struct tcphdr*)(iph + 1);
2372
2373	/* fill in the length field */
2374	mbufc->m_len = pkt_len;
2375	mbufc->m_pkthdr.len = pkt_len;
2376	/* indicate that the netfront uses hw-assisted checksums */
2377	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2378				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2379
2380	/* Function under test */
2381	xnb_add_mbuf_cksum(mbufc);
2382
2383	/* Check the checksums */
2384	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2385	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2386
2387	m_freem(mbufc);
2388}
2389
2390/**
2391 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2392 */
2393static void
2394xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2395{
2396	const size_t payload_len = 8;
2397	const size_t tcp_options_len = 12;
2398	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2399	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2400	struct mbuf *mbufc;
2401	struct ether_header *eh;
2402	struct ip *iph;
2403	struct tcphdr *tcp;
2404	/* Use deliberately bad checksums, and verify that they don't get */
2405	/* corrected by xnb_add_mbuf_cksum */
2406	const uint16_t IP_CSUM = 0xdead;
2407	const uint16_t TCP_CSUM = 0xbeef;
2408
2409	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2410	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2411	xnb_fill_tcp(mbufc);
2412	eh = mtod(mbufc, struct ether_header*);
2413	iph = (struct ip*)(eh + 1);
2414	iph->ip_sum = htons(IP_CSUM);
2415	tcp = (struct tcphdr*)(iph + 1);
2416	tcp->th_sum = htons(TCP_CSUM);
2417
2418	/* fill in the length field */
2419	mbufc->m_len = pkt_len;
2420	mbufc->m_pkthdr.len = pkt_len;
2421	/* indicate that the netfront does not use hw-assisted checksums */
2422	mbufc->m_pkthdr.csum_flags = 0;
2423
2424	/* Function under test */
2425	xnb_add_mbuf_cksum(mbufc);
2426
2427	/* Check that the checksums didn't change */
2428	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2429	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2430
2431	m_freem(mbufc);
2432}
2433#endif /* INET || INET6 */
2434
2435/**
2436 * sscanf on unsigned chars
2437 */
2438static void
2439xnb_sscanf_hhu(char *buffer, size_t buflen)
2440{
2441	const char mystr[] = "137";
2442	uint8_t dest[12];
2443	int i;
2444
2445	for (i = 0; i < 12; i++)
2446		dest[i] = 'X';
2447
2448	XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1);
2449	for (i = 0; i < 12; i++)
2450		XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2451}
2452
2453/**
2454 * sscanf on signed chars
2455 */
2456static void
2457xnb_sscanf_hhd(char *buffer, size_t buflen)
2458{
2459	const char mystr[] = "-27";
2460	int8_t dest[12];
2461	int i;
2462
2463	for (i = 0; i < 12; i++)
2464		dest[i] = 'X';
2465
2466	XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1);
2467	for (i = 0; i < 12; i++)
2468		XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2469}
2470
2471/**
2472 * sscanf on signed long longs
2473 */
2474static void
2475xnb_sscanf_lld(char *buffer, size_t buflen)
2476{
2477	const char mystr[] = "-123456789012345";	/* about -2**47 */
2478	long long dest[3];
2479	int i;
2480
2481	for (i = 0; i < 3; i++)
2482		dest[i] = (long long)0xdeadbeefdeadbeef;
2483
2484	XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1);
2485	for (i = 0; i < 3; i++)
2486		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2487		    -123456789012345));
2488}
2489
2490/**
2491 * sscanf on unsigned long longs
2492 */
2493static void
2494xnb_sscanf_llu(char *buffer, size_t buflen)
2495{
2496	const char mystr[] = "12802747070103273189";
2497	unsigned long long dest[3];
2498	int i;
2499
2500	for (i = 0; i < 3; i++)
2501		dest[i] = (long long)0xdeadbeefdeadbeef;
2502
2503	XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1);
2504	for (i = 0; i < 3; i++)
2505		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2506		    12802747070103273189ull));
2507}
2508
2509/**
2510 * sscanf on unsigned short short n's
2511 */
2512static void
2513xnb_sscanf_hhn(char *buffer, size_t buflen)
2514{
2515	const char mystr[] =
2516	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2517	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2518	    "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2519	unsigned char dest[12];
2520	int i;
2521
2522	for (i = 0; i < 12; i++)
2523		dest[i] = (unsigned char)'X';
2524
2525	XNB_ASSERT(sscanf(mystr,
2526	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2527	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2528	    "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0);
2529	for (i = 0; i < 12; i++)
2530		XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2531}
2532