1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33
34#include <sys/limits.h>
35#include <sys/time.h>
36
37#include "ixl.h"
38
39/********************************************************************
40 * Manage DMA'able memory.
41 *******************************************************************/
42static void
43i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
44{
45        if (error)
46                return;
47        *(bus_addr_t *) arg = segs->ds_addr;
48}
49
50i40e_status
51i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
52{
53	mem->va = malloc(size, M_IXL, M_NOWAIT | M_ZERO);
54	return (mem->va == NULL);
55}
56
57i40e_status
58i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
59{
60	free(mem->va, M_IXL);
61	mem->va = NULL;
62
63	return (I40E_SUCCESS);
64}
65
66i40e_status
67i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
68	enum i40e_memory_type type __unused, u64 size, u32 alignment)
69{
70	device_t	dev = ((struct i40e_osdep *)hw->back)->dev;
71	int		err;
72
73
74	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
75			       alignment, 0,	/* alignment, bounds */
76			       BUS_SPACE_MAXADDR,	/* lowaddr */
77			       BUS_SPACE_MAXADDR,	/* highaddr */
78			       NULL, NULL,	/* filter, filterarg */
79			       size,	/* maxsize */
80			       1,	/* nsegments */
81			       size,	/* maxsegsize */
82			       BUS_DMA_ALLOCNOW, /* flags */
83			       NULL,	/* lockfunc */
84			       NULL,	/* lockfuncarg */
85			       &mem->tag);
86	if (err != 0) {
87		device_printf(dev,
88		    "i40e_allocate_dma: bus_dma_tag_create failed, "
89		    "error %u\n", err);
90		goto fail_0;
91	}
92	err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
93			     BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
94	if (err != 0) {
95		device_printf(dev,
96		    "i40e_allocate_dma: bus_dmamem_alloc failed, "
97		    "error %u\n", err);
98		goto fail_1;
99	}
100	err = bus_dmamap_load(mem->tag, mem->map, mem->va,
101			    size,
102			    i40e_dmamap_cb,
103			    &mem->pa,
104			    BUS_DMA_NOWAIT);
105	if (err != 0) {
106		device_printf(dev,
107		    "i40e_allocate_dma: bus_dmamap_load failed, "
108		    "error %u\n", err);
109		goto fail_2;
110	}
111	mem->size = size;
112	bus_dmamap_sync(mem->tag, mem->map,
113	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
114	return (I40E_SUCCESS);
115fail_2:
116	bus_dmamem_free(mem->tag, mem->va, mem->map);
117fail_1:
118	bus_dma_tag_destroy(mem->tag);
119fail_0:
120	mem->map = NULL;
121	mem->tag = NULL;
122	return (err);
123}
124
125i40e_status
126i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
127{
128	bus_dmamap_sync(mem->tag, mem->map,
129	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
130	bus_dmamap_unload(mem->tag, mem->map);
131	bus_dmamem_free(mem->tag, mem->va, mem->map);
132	bus_dma_tag_destroy(mem->tag);
133	return (I40E_SUCCESS);
134}
135
136void
137i40e_init_spinlock(struct i40e_spinlock *lock)
138{
139	mtx_init(&lock->mutex, "mutex",
140	    "ixl spinlock", MTX_DEF | MTX_DUPOK);
141}
142
143void
144i40e_acquire_spinlock(struct i40e_spinlock *lock)
145{
146	mtx_lock(&lock->mutex);
147}
148
149void
150i40e_release_spinlock(struct i40e_spinlock *lock)
151{
152	mtx_unlock(&lock->mutex);
153}
154
155void
156i40e_destroy_spinlock(struct i40e_spinlock *lock)
157{
158	if (mtx_initialized(&lock->mutex))
159		mtx_destroy(&lock->mutex);
160}
161
162#ifndef MSEC_2_TICKS
163#define MSEC_2_TICKS(m) max(1, (uint32_t)((hz == 1000) ? \
164	  (m) : ((uint64_t)(m) * (uint64_t)hz)/(uint64_t)1000))
165#endif
166
167void
168i40e_msec_pause(int msecs)
169{
170	pause("i40e_msec_pause", MSEC_2_TICKS(msecs));
171}
172
173/*
174 * Helper function for debug statement printing
175 */
176void
177i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
178{
179	va_list args;
180	device_t dev;
181
182	if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
183		return;
184
185	dev = ((struct i40e_osdep *)hw->back)->dev;
186
187	/* Re-implement device_printf() */
188	device_print_prettyname(dev);
189	va_start(args, fmt);
190	vprintf(fmt, args);
191	va_end(args);
192}
193
194const char *
195ixl_vc_opcode_str(uint16_t op)
196{
197	switch (op) {
198	case VIRTCHNL_OP_VERSION:
199		return ("VERSION");
200	case VIRTCHNL_OP_RESET_VF:
201		return ("RESET_VF");
202	case VIRTCHNL_OP_GET_VF_RESOURCES:
203		return ("GET_VF_RESOURCES");
204	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
205		return ("CONFIG_TX_QUEUE");
206	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
207		return ("CONFIG_RX_QUEUE");
208	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
209		return ("CONFIG_VSI_QUEUES");
210	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
211		return ("CONFIG_IRQ_MAP");
212	case VIRTCHNL_OP_ENABLE_QUEUES:
213		return ("ENABLE_QUEUES");
214	case VIRTCHNL_OP_DISABLE_QUEUES:
215		return ("DISABLE_QUEUES");
216	case VIRTCHNL_OP_ADD_ETH_ADDR:
217		return ("ADD_ETH_ADDR");
218	case VIRTCHNL_OP_DEL_ETH_ADDR:
219		return ("DEL_ETH_ADDR");
220	case VIRTCHNL_OP_ADD_VLAN:
221		return ("ADD_VLAN");
222	case VIRTCHNL_OP_DEL_VLAN:
223		return ("DEL_VLAN");
224	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
225		return ("CONFIG_PROMISCUOUS_MODE");
226	case VIRTCHNL_OP_GET_STATS:
227		return ("GET_STATS");
228	case VIRTCHNL_OP_RSVD:
229		return ("RSVD");
230	case VIRTCHNL_OP_EVENT:
231		return ("EVENT");
232	case VIRTCHNL_OP_CONFIG_RSS_KEY:
233		return ("CONFIG_RSS_KEY");
234	case VIRTCHNL_OP_CONFIG_RSS_LUT:
235		return ("CONFIG_RSS_LUT");
236	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
237		return ("GET_RSS_HENA_CAPS");
238	case VIRTCHNL_OP_SET_RSS_HENA:
239		return ("SET_RSS_HENA");
240	default:
241		return ("UNKNOWN");
242	}
243}
244
245u16
246i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg)
247{
248        u16 value;
249
250        value = pci_read_config(((struct i40e_osdep *)hw->back)->dev,
251            reg, 2);
252
253        return (value);
254}
255
256void
257i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value)
258{
259        pci_write_config(((struct i40e_osdep *)hw->back)->dev,
260            reg, value, 2);
261}
262
263