1210284Sjmallett/***********************license start*************** 2215990Sjmallett * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3215990Sjmallett * reserved. 4210284Sjmallett * 5210284Sjmallett * 6215990Sjmallett * Redistribution and use in source and binary forms, with or without 7215990Sjmallett * modification, are permitted provided that the following conditions are 8215990Sjmallett * met: 9210284Sjmallett * 10215990Sjmallett * * Redistributions of source code must retain the above copyright 11215990Sjmallett * notice, this list of conditions and the following disclaimer. 12210284Sjmallett * 13215990Sjmallett * * Redistributions in binary form must reproduce the above 14215990Sjmallett * copyright notice, this list of conditions and the following 15215990Sjmallett * disclaimer in the documentation and/or other materials provided 16215990Sjmallett * with the distribution. 17215990Sjmallett 18215990Sjmallett * * Neither the name of Cavium Networks nor the names of 19215990Sjmallett * its contributors may be used to endorse or promote products 20215990Sjmallett * derived from this software without specific prior written 21215990Sjmallett * permission. 22215990Sjmallett 23215990Sjmallett * This Software, including technical data, may be subject to U.S. export control 24215990Sjmallett * laws, including the U.S. Export Administration Act and its associated 25215990Sjmallett * regulations, and may be subject to export or import regulations in other 26215990Sjmallett * countries. 27215990Sjmallett 28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29215990Sjmallett * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38210284Sjmallett ***********************license end**************************************/ 39210284Sjmallett 40210284Sjmallett 41210284Sjmallett 42210284Sjmallett 43210284Sjmallett 44210284Sjmallett 45215990Sjmallett 46210284Sjmallett/** 47210284Sjmallett * @file 48210284Sjmallett * 49210284Sjmallett * Interface to the hardware Input Packet Data unit. 50210284Sjmallett * 51215990Sjmallett * <hr>$Revision: 49448 $<hr> 52210284Sjmallett */ 53210284Sjmallett 54210284Sjmallett 55210284Sjmallett#ifndef __CVMX_IPD_H__ 56210284Sjmallett#define __CVMX_IPD_H__ 57210284Sjmallett 58215990Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_KERNEL 59215990Sjmallett#include <asm/octeon/cvmx.h> 60215990Sjmallett#include <asm/octeon/cvmx-config.h> 61215990Sjmallett#include <asm/octeon/cvmx-ipd-defs.h> 62215990Sjmallett#else 63215990Sjmallett# ifndef CVMX_DONT_INCLUDE_CONFIG 64215990Sjmallett# include "executive-config.h" 65215990Sjmallett# ifdef CVMX_ENABLE_PKO_FUNCTIONS 66215990Sjmallett# include "cvmx-config.h" 67215990Sjmallett# endif 68215990Sjmallett# endif 69210284Sjmallett#endif 70210284Sjmallett 71210284Sjmallett#ifdef __cplusplus 72210284Sjmallettextern "C" { 73210284Sjmallett#endif 74210284Sjmallett 75210284Sjmallett#ifndef CVMX_ENABLE_LEN_M8_FIX 76210284Sjmallett#define CVMX_ENABLE_LEN_M8_FIX 0 77210284Sjmallett#endif 78210284Sjmallett 79215990Sjmallett/* CSR typedefs have been moved to cvmx-ipd-defs.h */ 80210284Sjmallett 81215990Sjmalletttypedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_not_first_skip_t; 82215990Sjmalletttypedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t; 83210284Sjmallett 84210284Sjmallett 85210284Sjmallett/** 86210284Sjmallett * Configure IPD 87210284Sjmallett * 88210284Sjmallett * @param mbuff_size Packets buffer size in 8 byte words 89210284Sjmallett * @param first_mbuff_skip 90210284Sjmallett * Number of 8 byte words to skip in the first buffer 91210284Sjmallett * @param not_first_mbuff_skip 92210284Sjmallett * Number of 8 byte words to skip in each following buffer 93210284Sjmallett * @param first_back Must be same as first_mbuff_skip / 128 94210284Sjmallett * @param second_back 95210284Sjmallett * Must be same as not_first_mbuff_skip / 128 96210284Sjmallett * @param wqe_fpa_pool 97210284Sjmallett * FPA pool to get work entries from 98210284Sjmallett * @param cache_mode 99210284Sjmallett * @param back_pres_enable_flag 100215990Sjmallett * Enable or disable port back pressure at a global level. 101215990Sjmallett * This should always be 1 as more accurate control can be 102215990Sjmallett * found in IPD_PORTX_BP_PAGE_CNT[BP_ENB]. 103210284Sjmallett */ 104210284Sjmallettstatic inline void cvmx_ipd_config(uint64_t mbuff_size, 105210284Sjmallett uint64_t first_mbuff_skip, 106210284Sjmallett uint64_t not_first_mbuff_skip, 107210284Sjmallett uint64_t first_back, 108210284Sjmallett uint64_t second_back, 109210284Sjmallett uint64_t wqe_fpa_pool, 110210284Sjmallett cvmx_ipd_mode_t cache_mode, 111210284Sjmallett uint64_t back_pres_enable_flag 112210284Sjmallett ) 113210284Sjmallett{ 114215990Sjmallett cvmx_ipd_1st_mbuff_skip_t first_skip; 115210284Sjmallett cvmx_ipd_mbuff_not_first_skip_t not_first_skip; 116215990Sjmallett cvmx_ipd_packet_mbuff_size_t size; 117215990Sjmallett cvmx_ipd_1st_next_ptr_back_t first_back_struct; 118210284Sjmallett cvmx_ipd_second_next_ptr_back_t second_back_struct; 119215990Sjmallett cvmx_ipd_wqe_fpa_queue_t wqe_pool; 120210284Sjmallett cvmx_ipd_ctl_status_t ipd_ctl_reg; 121210284Sjmallett 122210284Sjmallett first_skip.u64 = 0; 123210284Sjmallett first_skip.s.skip_sz = first_mbuff_skip; 124210284Sjmallett cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64); 125210284Sjmallett 126210284Sjmallett not_first_skip.u64 = 0; 127210284Sjmallett not_first_skip.s.skip_sz = not_first_mbuff_skip; 128210284Sjmallett cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64); 129210284Sjmallett 130210284Sjmallett size.u64 = 0; 131210284Sjmallett size.s.mb_size = mbuff_size; 132210284Sjmallett cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64); 133210284Sjmallett 134210284Sjmallett first_back_struct.u64 = 0; 135210284Sjmallett first_back_struct.s.back = first_back; 136210284Sjmallett cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64); 137210284Sjmallett 138210284Sjmallett second_back_struct.u64 = 0; 139210284Sjmallett second_back_struct.s.back = second_back; 140210284Sjmallett cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK,second_back_struct.u64); 141210284Sjmallett 142210284Sjmallett wqe_pool.u64 = 0; 143210284Sjmallett wqe_pool.s.wqe_pool = wqe_fpa_pool; 144210284Sjmallett cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64); 145210284Sjmallett 146210284Sjmallett ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); 147210284Sjmallett ipd_ctl_reg.s.opc_mode = cache_mode; 148210284Sjmallett ipd_ctl_reg.s.pbp_en = back_pres_enable_flag; 149210284Sjmallett cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64); 150210284Sjmallett 151210284Sjmallett /* Note: the example RED code that used to be here has been moved to 152210284Sjmallett cvmx_helper_setup_red */ 153210284Sjmallett} 154210284Sjmallett 155210284Sjmallett 156210284Sjmallett/** 157210284Sjmallett * Enable IPD 158210284Sjmallett */ 159210284Sjmallettstatic inline void cvmx_ipd_enable(void) 160210284Sjmallett{ 161210284Sjmallett cvmx_ipd_ctl_status_t ipd_reg; 162210284Sjmallett ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); 163210284Sjmallett if (ipd_reg.s.ipd_en) 164210284Sjmallett { 165210284Sjmallett cvmx_dprintf("Warning: Enabling IPD when IPD already enabled.\n"); 166210284Sjmallett } 167215990Sjmallett ipd_reg.s.ipd_en = 1; 168210284Sjmallett #if CVMX_ENABLE_LEN_M8_FIX 169210284Sjmallett if(!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) { 170215990Sjmallett ipd_reg.s.len_m8 = 1; 171210284Sjmallett } 172210284Sjmallett #endif 173210284Sjmallett cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); 174210284Sjmallett} 175210284Sjmallett 176210284Sjmallett 177210284Sjmallett/** 178210284Sjmallett * Disable IPD 179210284Sjmallett */ 180210284Sjmallettstatic inline void cvmx_ipd_disable(void) 181210284Sjmallett{ 182210284Sjmallett cvmx_ipd_ctl_status_t ipd_reg; 183210284Sjmallett ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); 184215990Sjmallett ipd_reg.s.ipd_en = 0; 185210284Sjmallett cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); 186210284Sjmallett} 187210284Sjmallett 188210284Sjmallett#ifdef CVMX_ENABLE_PKO_FUNCTIONS 189210284Sjmallett/** 190215990Sjmallett * @INTERNAL 191215990Sjmallett * This function is called by cvmx_helper_shutdown() to extract 192215990Sjmallett * all FPA buffers out of the IPD and PIP. After this function 193215990Sjmallett * completes, all FPA buffers that were prefetched by IPD and PIP 194215990Sjmallett * wil be in the apropriate FPA pool. This functions does not reset 195215990Sjmallett * PIP or IPD as FPA pool zero must be empty before the reset can 196215990Sjmallett * be performed. WARNING: It is very important that IPD and PIP be 197215990Sjmallett * reset soon after a call to this function. 198210284Sjmallett */ 199215990Sjmallettstatic inline void __cvmx_ipd_free_ptr(void) 200210284Sjmallett{ 201210284Sjmallett /* Only CN38XXp{1,2} cannot read pointer out of the IPD */ 202215990Sjmallett if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) { 203210284Sjmallett int no_wptr = 0; 204210284Sjmallett cvmx_ipd_ptr_count_t ipd_ptr_count; 205210284Sjmallett ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT); 206210284Sjmallett 207210284Sjmallett /* Handle Work Queue Entry in cn56xx and cn52xx */ 208210284Sjmallett if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) { 209210284Sjmallett cvmx_ipd_ctl_status_t ipd_ctl_status; 210210284Sjmallett ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); 211215990Sjmallett if (ipd_ctl_status.s.no_wptr) 212210284Sjmallett no_wptr = 1; 213210284Sjmallett } 214210284Sjmallett 215210284Sjmallett /* Free the prefetched WQE */ 216210284Sjmallett if (ipd_ptr_count.s.wqev_cnt) { 217210284Sjmallett cvmx_ipd_wqe_ptr_valid_t ipd_wqe_ptr_valid; 218210284Sjmallett ipd_wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID); 219210284Sjmallett if (no_wptr) 220210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0); 221210284Sjmallett else 222210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_WQE_POOL, 0); 223210284Sjmallett } 224210284Sjmallett 225210284Sjmallett /* Free all WQE in the fifo */ 226210284Sjmallett if (ipd_ptr_count.s.wqe_pcnt) { 227210284Sjmallett int i; 228210284Sjmallett cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl; 229210284Sjmallett ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); 230210284Sjmallett for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) { 231210284Sjmallett ipd_pwp_ptr_fifo_ctl.s.cena = 0; 232210284Sjmallett ipd_pwp_ptr_fifo_ctl.s.raddr = ipd_pwp_ptr_fifo_ctl.s.max_cnts + (ipd_pwp_ptr_fifo_ctl.s.wraddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts; 233210284Sjmallett cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64); 234210284Sjmallett ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); 235210284Sjmallett if (no_wptr) 236210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0); 237210284Sjmallett else 238210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_WQE_POOL, 0); 239210284Sjmallett } 240210284Sjmallett ipd_pwp_ptr_fifo_ctl.s.cena = 1; 241210284Sjmallett cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64); 242210284Sjmallett } 243210284Sjmallett 244210284Sjmallett /* Free the prefetched packet */ 245210284Sjmallett if (ipd_ptr_count.s.pktv_cnt) { 246210284Sjmallett cvmx_ipd_pkt_ptr_valid_t ipd_pkt_ptr_valid; 247210284Sjmallett ipd_pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID); 248215990Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pkt_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0); 249210284Sjmallett } 250210284Sjmallett 251210284Sjmallett /* Free the per port prefetched packets */ 252210284Sjmallett if (1) { 253210284Sjmallett int i; 254210284Sjmallett cvmx_ipd_prc_port_ptr_fifo_ctl_t ipd_prc_port_ptr_fifo_ctl; 255210284Sjmallett ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL); 256210284Sjmallett 257210284Sjmallett for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt; i++) { 258210284Sjmallett ipd_prc_port_ptr_fifo_ctl.s.cena = 0; 259210284Sjmallett ipd_prc_port_ptr_fifo_ctl.s.raddr = i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt; 260210284Sjmallett cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64); 261210284Sjmallett ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL); 262210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0); 263210284Sjmallett } 264210284Sjmallett ipd_prc_port_ptr_fifo_ctl.s.cena = 1; 265210284Sjmallett cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64); 266210284Sjmallett } 267210284Sjmallett 268210284Sjmallett /* Free all packets in the holding fifo */ 269210284Sjmallett if (ipd_ptr_count.s.pfif_cnt) { 270210284Sjmallett int i; 271210284Sjmallett cvmx_ipd_prc_hold_ptr_fifo_ctl_t ipd_prc_hold_ptr_fifo_ctl; 272210284Sjmallett 273210284Sjmallett ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL); 274210284Sjmallett 275210284Sjmallett for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) { 276210284Sjmallett ipd_prc_hold_ptr_fifo_ctl.s.cena = 0; 277210284Sjmallett ipd_prc_hold_ptr_fifo_ctl.s.raddr = (ipd_prc_hold_ptr_fifo_ctl.s.praddr + i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt; 278210284Sjmallett cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64); 279210284Sjmallett ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL); 280210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0); 281210284Sjmallett } 282210284Sjmallett ipd_prc_hold_ptr_fifo_ctl.s.cena = 1; 283210284Sjmallett cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64); 284210284Sjmallett } 285210284Sjmallett 286210284Sjmallett /* Free all packets in the fifo */ 287210284Sjmallett if (ipd_ptr_count.s.pkt_pcnt) { 288210284Sjmallett int i; 289210284Sjmallett cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl; 290210284Sjmallett ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); 291210284Sjmallett 292210284Sjmallett for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) { 293210284Sjmallett ipd_pwp_ptr_fifo_ctl.s.cena = 0; 294210284Sjmallett ipd_pwp_ptr_fifo_ctl.s.raddr = (ipd_pwp_ptr_fifo_ctl.s.praddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts; 295210284Sjmallett cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64); 296210284Sjmallett ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); 297210284Sjmallett cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0); 298210284Sjmallett } 299210284Sjmallett ipd_pwp_ptr_fifo_ctl.s.cena = 1; 300210284Sjmallett cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64); 301210284Sjmallett } 302210284Sjmallett } 303210284Sjmallett} 304210284Sjmallett#endif 305210284Sjmallett 306210284Sjmallett#ifdef __cplusplus 307210284Sjmallett} 308210284Sjmallett#endif 309210284Sjmallett 310210284Sjmallett#endif /* __CVMX_IPD_H__ */ 311