ecore_init_ops.c revision 337519
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File : ecore_init_ops.c
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_init_ops.c 337519 2018-08-09 01:39:47Z davidcs $");
33
34/* include the precompiled configuration values - only once */
35#include "bcm_osal.h"
36#include "ecore_hsi_common.h"
37#include "ecore.h"
38#include "ecore_hw.h"
39#include "ecore_status.h"
40#include "ecore_rt_defs.h"
41#include "ecore_init_fw_funcs.h"
42
43#ifndef CONFIG_ECORE_BINARY_FW
44#ifdef CONFIG_ECORE_ZIPPED_FW
45#include "ecore_init_values_zipped.h"
46#else
47#include "ecore_init_values.h"
48#endif
49#endif
50
51#include "ecore_iro_values.h"
52#include "ecore_sriov.h"
53#include "ecore_gtt_values.h"
54#include "reg_addr.h"
55#include "ecore_init_ops.h"
56
57#define ECORE_INIT_MAX_POLL_COUNT	100
58#define ECORE_INIT_POLL_PERIOD_US	500
59
60void ecore_init_iro_array(struct ecore_dev *p_dev)
61{
62	p_dev->iro_arr = iro_arr;
63}
64
65/* Runtime configuration helpers */
66void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
67{
68	int i;
69
70	for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
71		p_hwfn->rt_data.b_valid[i] = false;
72}
73
74void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
75			     u32 rt_offset, u32 val)
76{
77	if (rt_offset >= RUNTIME_ARRAY_SIZE) {
78		DP_ERR(p_hwfn,
79		       "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
80		       val, rt_offset, RUNTIME_ARRAY_SIZE);
81		return;
82	}
83
84	p_hwfn->rt_data.init_val[rt_offset] = val;
85	p_hwfn->rt_data.b_valid[rt_offset] = true;
86}
87
88void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
89			     u32 rt_offset, u32 *p_val,
90			     osal_size_t size)
91{
92	osal_size_t i;
93
94	if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
95		DP_ERR(p_hwfn,
96		       "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
97		       rt_offset, (u32)(rt_offset + size - 1),
98		       RUNTIME_ARRAY_SIZE);
99		return;
100	}
101
102	for (i = 0; i < size / sizeof(u32); i++) {
103		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
104		p_hwfn->rt_data.b_valid[rt_offset + i] = true;
105
106	}
107}
108
109static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
110					  struct ecore_ptt *p_ptt,
111					  u32 addr,
112					  u16 rt_offset,
113					  u16 size,
114					  bool b_must_dmae)
115{
116	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
117	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
118	u16 i, segment;
119	enum _ecore_status_t rc = ECORE_SUCCESS;
120
121	/* Since not all RT entries are initialized, go over the RT and
122	 * for each segment of initialized values use DMA.
123	 */
124	for (i = 0; i < size; i++) {
125		if (!p_valid[i])
126			continue;
127
128		/* In case there isn't any wide-bus configuration here,
129		 * simply write the data instead of using dmae.
130		 */
131		if (!b_must_dmae) {
132			ecore_wr(p_hwfn, p_ptt, addr + (i << 2),
133				 p_init_val[i]);
134			continue;
135		}
136
137		/* Start of a new segment */
138		for (segment = 1; i + segment < size; segment++)
139			if (!p_valid[i + segment])
140				break;
141
142		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
143					 (osal_uintptr_t)(p_init_val + i),
144					 addr + (i << 2), segment,
145					 OSAL_NULL /* default parameters */);
146		if (rc != ECORE_SUCCESS)
147			return rc;
148
149		/* Jump over the entire segment, including invalid entry */
150		i += segment;
151	}
152
153	return rc;
154}
155
156enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
157{
158	struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
159
160	if (IS_VF(p_hwfn->p_dev))
161		return ECORE_SUCCESS;
162
163	rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
164				       sizeof(bool) * RUNTIME_ARRAY_SIZE);
165	if (!rt_data->b_valid)
166		return ECORE_NOMEM;
167
168	rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
169					sizeof(u32) * RUNTIME_ARRAY_SIZE);
170	if (!rt_data->init_val) {
171		OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
172		rt_data->b_valid = OSAL_NULL;
173		return ECORE_NOMEM;
174	}
175
176	return ECORE_SUCCESS;
177}
178
179void ecore_init_free(struct ecore_hwfn *p_hwfn)
180{
181	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
182	p_hwfn->rt_data.init_val = OSAL_NULL;
183	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
184	p_hwfn->rt_data.b_valid = OSAL_NULL;
185}
186
187static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
188				  struct ecore_ptt *p_ptt,
189				  u32 addr, u32 dmae_data_offset,
190				  u32 size, const u32 *p_buf,
191				  bool b_must_dmae, bool b_can_dmae)
192{
193	enum _ecore_status_t rc	= ECORE_SUCCESS;
194
195	/* Perform DMAE only for lengthy enough sections or for wide-bus */
196#ifndef ASIC_ONLY
197	if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
198	    !b_can_dmae || (!b_must_dmae && (size < 16))) {
199#else
200	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
201#endif
202		const u32 *data = p_buf + dmae_data_offset;
203		u32 i;
204
205		for (i = 0; i < size; i++)
206			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
207	} else {
208		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
209					 (osal_uintptr_t)(p_buf +
210							  dmae_data_offset),
211					 addr, size,
212					 OSAL_NULL /* default parameters */);
213	}
214
215	return rc;
216}
217
218static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
219						 struct ecore_ptt *p_ptt,
220						 u32 addr, u32 fill_count)
221{
222	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
223	struct ecore_dmae_params params;
224
225	OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
226
227	OSAL_MEMSET(&params, 0, sizeof(params));
228	params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
229	return ecore_dmae_host2grc(p_hwfn, p_ptt,
230				   (osal_uintptr_t)(&(zero_buffer[0])),
231				   addr, fill_count, &params);
232}
233
234static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
235			    struct ecore_ptt *p_ptt,
236			    u32 addr, u32 fill, u32 fill_count)
237{
238	u32 i;
239
240	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
241		ecore_wr(p_hwfn, p_ptt, addr, fill);
242}
243
244
245static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
246						 struct ecore_ptt *p_ptt,
247						 struct init_write_op *cmd,
248						 bool b_must_dmae,
249						 bool b_can_dmae)
250{
251	u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
252	u32 data = OSAL_LE32_TO_CPU(cmd->data);
253	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
254#ifdef CONFIG_ECORE_ZIPPED_FW
255	u32 offset, output_len, input_len, max_size;
256#endif
257	struct ecore_dev *p_dev = p_hwfn->p_dev;
258	union init_array_hdr *hdr;
259	const u32 *array_data;
260	enum _ecore_status_t rc = ECORE_SUCCESS;
261	u32 size;
262
263	array_data = p_dev->fw_data->arr_data;
264
265	hdr = (union init_array_hdr *) (array_data +
266					dmae_array_offset);
267	data = OSAL_LE32_TO_CPU(hdr->raw.data);
268	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
269	case INIT_ARR_ZIPPED:
270#ifdef CONFIG_ECORE_ZIPPED_FW
271		offset = dmae_array_offset + 1;
272		input_len = GET_FIELD(data,
273				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
274		max_size = MAX_ZIPPED_SIZE * 4;
275		OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
276
277		output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
278					     (u8 *)&array_data[offset],
279					     max_size, (u8 *)p_hwfn->unzip_buf);
280		if (output_len) {
281			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
282						   output_len,
283						   p_hwfn->unzip_buf,
284						   b_must_dmae, b_can_dmae);
285		} else {
286			DP_NOTICE(p_hwfn, true,
287				  "Failed to unzip dmae data\n");
288			rc = ECORE_INVAL;
289		}
290#else
291		DP_NOTICE(p_hwfn, true,
292			  "Using zipped firmware without config enabled\n");
293		rc = ECORE_INVAL;
294#endif
295		break;
296	case INIT_ARR_PATTERN:
297	{
298		u32 repeats = GET_FIELD(data,
299					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
300		u32 i;
301
302		size = GET_FIELD(data,
303				 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
304
305		for (i = 0; i < repeats; i++, addr += size << 2) {
306			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
307						   dmae_array_offset + 1,
308						   size, array_data,
309						   b_must_dmae, b_can_dmae);
310			if (rc)
311				break;
312		}
313		break;
314	}
315	case INIT_ARR_STANDARD:
316		size = GET_FIELD(data,
317				 INIT_ARRAY_STANDARD_HDR_SIZE);
318		rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
319					   dmae_array_offset + 1,
320					   size, array_data,
321					   b_must_dmae, b_can_dmae);
322		break;
323	}
324
325	return rc;
326}
327
328/* init_ops write command */
329static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
330					      struct ecore_ptt *p_ptt,
331					      struct init_write_op *p_cmd,
332					      bool b_can_dmae)
333{
334	u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
335	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
336	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
337	enum _ecore_status_t rc	= ECORE_SUCCESS;
338
339	/* Sanitize */
340	if (b_must_dmae && !b_can_dmae) {
341		DP_NOTICE(p_hwfn, true,
342			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
343			  addr);
344		return ECORE_INVAL;
345	}
346
347	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
348	case INIT_SRC_INLINE:
349		data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
350		ecore_wr(p_hwfn, p_ptt, addr, data);
351		break;
352	case INIT_SRC_ZEROS:
353		data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
354		if (b_must_dmae || (b_can_dmae && (data >= 64)))
355			rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
356		else
357			ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
358		break;
359	case INIT_SRC_ARRAY:
360		rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
361					  b_must_dmae, b_can_dmae);
362		break;
363	case INIT_SRC_RUNTIME:
364		rc = ecore_init_rt(p_hwfn, p_ptt, addr,
365				   OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
366				   OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
367				   b_must_dmae);
368		break;
369	}
370
371	return rc;
372}
373
374static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
375{
376	return (val == expected_val);
377}
378
379static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
380{
381	return (val & expected_val) == expected_val;
382}
383
384static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
385{
386	return (val | expected_val) > 0;
387}
388
389/* init_ops read/poll commands */
390static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
391			      struct ecore_ptt *p_ptt,
392			      struct init_read_op *cmd)
393{
394	bool (*comp_check)(u32 val, u32 expected_val);
395	u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
396	u32 data, addr, poll;
397	int i;
398
399	data = OSAL_LE32_TO_CPU(cmd->op_data);
400	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
401	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
402
403#ifndef ASIC_ONLY
404	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
405		delay *= 100;
406#endif
407
408	val = ecore_rd(p_hwfn, p_ptt, addr);
409
410	if (poll == INIT_POLL_NONE)
411		return;
412
413	switch (poll) {
414	case INIT_POLL_EQ:
415		comp_check = comp_eq;
416		break;
417	case INIT_POLL_OR:
418		comp_check = comp_or;
419		break;
420	case INIT_POLL_AND:
421		comp_check = comp_and;
422		break;
423	default:
424		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
425		       cmd->op_data);
426		return;
427	}
428
429	data = OSAL_LE32_TO_CPU(cmd->expected_val);
430	for (i = 0;
431	     i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data);
432	     i++) {
433		OSAL_UDELAY(delay);
434		val = ecore_rd(p_hwfn, p_ptt, addr);
435	}
436
437	if (i == ECORE_INIT_MAX_POLL_COUNT)
438		DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
439		       addr,
440		       OSAL_LE32_TO_CPU(cmd->expected_val), val,
441		       OSAL_LE32_TO_CPU(cmd->op_data));
442}
443
444/* init_ops callbacks entry point */
445static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
446					      struct ecore_ptt *p_ptt,
447					      struct init_callback_op *p_cmd)
448{
449	enum _ecore_status_t rc;
450
451	switch (p_cmd->callback_id) {
452	case DMAE_READY_CB:
453		rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
454		break;
455	default:
456		DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
457			  p_cmd->callback_id);
458		return ECORE_INVAL;
459	}
460
461	return rc;
462}
463
464static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
465				    u16 *p_offset, int modes)
466{
467	struct ecore_dev *p_dev = p_hwfn->p_dev;
468	const u8 *modes_tree_buf;
469	u8 arg1, arg2, tree_val;
470
471	modes_tree_buf = p_dev->fw_data->modes_tree_buf;
472	tree_val = modes_tree_buf[(*p_offset)++];
473	switch(tree_val) {
474	case INIT_MODE_OP_NOT:
475		return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
476	case INIT_MODE_OP_OR:
477		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
478		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
479		return arg1 | arg2;
480	case INIT_MODE_OP_AND:
481		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
482		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
483		return arg1 & arg2;
484	default:
485		tree_val -= MAX_INIT_MODE_OPS;
486		return (modes & (1 << tree_val)) ? 1 : 0;
487	}
488}
489
490static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
491			       struct init_if_mode_op *p_cmd, int modes)
492{
493	u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
494
495	if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
496		return 0;
497	else
498		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
499				 INIT_IF_MODE_OP_CMD_OFFSET);
500}
501
502static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
503				u32 phase, u32 phase_id)
504{
505	u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
506	u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
507
508	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
509	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
510	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
511		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
512	else
513		return 0;
514}
515
516enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
517				    struct ecore_ptt *p_ptt,
518				    int phase,
519				    int phase_id,
520				    int modes)
521{
522	struct ecore_dev *p_dev = p_hwfn->p_dev;
523	u32 cmd_num, num_init_ops;
524	union init_op *init_ops;
525	bool b_dmae = false;
526	enum _ecore_status_t rc = ECORE_SUCCESS;
527
528	num_init_ops = p_dev->fw_data->init_ops_size;
529	init_ops = p_dev->fw_data->init_ops;
530
531#ifdef CONFIG_ECORE_ZIPPED_FW
532	p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
533					MAX_ZIPPED_SIZE * 4);
534	if (!p_hwfn->unzip_buf) {
535		DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
536		return ECORE_NOMEM;
537	}
538#endif
539
540	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
541		union init_op *cmd = &init_ops[cmd_num];
542		u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
543
544		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
545		case INIT_OP_WRITE:
546			rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
547					       b_dmae);
548			break;
549
550		case INIT_OP_READ:
551			ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
552			break;
553
554		case INIT_OP_IF_MODE:
555			cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
556						       modes);
557			break;
558		case INIT_OP_IF_PHASE:
559			cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
560							phase_id);
561			b_dmae = GET_FIELD(data,
562					   INIT_IF_PHASE_OP_DMAE_ENABLE);
563			break;
564		case INIT_OP_DELAY:
565			/* ecore_init_run is always invoked from
566			 * sleep-able context
567			 */
568			OSAL_UDELAY(cmd->delay.delay);
569			break;
570
571		case INIT_OP_CALLBACK:
572			rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
573			break;
574		}
575
576		if (rc)
577			break;
578	}
579#ifdef CONFIG_ECORE_ZIPPED_FW
580	OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
581	p_hwfn->unzip_buf = OSAL_NULL;
582#endif
583	return rc;
584}
585
586void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
587		    struct ecore_ptt *p_ptt)
588{
589	u32 gtt_base;
590	u32 i;
591
592#ifndef ASIC_ONLY
593	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
594		/* This is done by MFW on ASIC; regardless, this should only
595		 * be done once per chip [i.e., common]. Implementation is
596		 * not too bright, but it should work on the simple FPGA/EMUL
597		 * scenarios.
598		 */
599		static bool initialized = false;
600		int poll_cnt = 500;
601		u32 val;
602
603		/* initialize PTT/GTT (poll for completion) */
604		if (!initialized) {
605			ecore_wr(p_hwfn, p_ptt,
606				 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
607			initialized = true;
608		}
609
610		do {
611			/* ptt might be overrided by HW until this is done */
612			OSAL_UDELAY(10);
613			ecore_ptt_invalidate(p_hwfn);
614			val = ecore_rd(p_hwfn, p_ptt,
615				       PGLUE_B_REG_INIT_DONE_PTT_GTT);
616		} while ((val != 1) && --poll_cnt);
617
618		if (!poll_cnt)
619			DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n");
620	}
621#endif
622
623	/* Set the global windows */
624	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
625
626	for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
627		if (pxp_global_win[i])
628			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
629			       pxp_global_win[i]);
630}
631
632enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
633#ifdef CONFIG_ECORE_BINARY_FW
634					const u8 *fw_data)
635#else
636					const u8 OSAL_UNUSED *fw_data)
637#endif
638{
639	struct ecore_fw_data *fw = p_dev->fw_data;
640
641#ifdef CONFIG_ECORE_BINARY_FW
642	struct bin_buffer_hdr *buf_hdr;
643	u32 offset, len;
644
645	if (!fw_data) {
646		DP_NOTICE(p_dev, true, "Invalid fw data\n");
647		return ECORE_INVAL;
648	}
649
650	buf_hdr = (struct bin_buffer_hdr *)fw_data;
651
652	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
653	fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset);
654
655	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
656	fw->init_ops = (union init_op *)(fw_data + offset);
657
658	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
659	fw->arr_data = (u32 *)(fw_data + offset);
660
661	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
662	fw->modes_tree_buf = (u8 *)(fw_data + offset);
663	len = buf_hdr[BIN_BUF_INIT_CMD].length;
664	fw->init_ops_size = len / sizeof(struct init_raw_op);
665#else
666	fw->init_ops = (union init_op *)init_ops;
667	fw->arr_data = (u32 *)init_val;
668	fw->modes_tree_buf = (u8 *)modes_tree_buf;
669	fw->init_ops_size = init_ops_size;
670#endif
671
672	return ECORE_SUCCESS;
673}
674