1210284Sjmallett/***********************license start*************** 2215990Sjmallett * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3215990Sjmallett * reserved. 4210284Sjmallett * 5210284Sjmallett * 6215990Sjmallett * Redistribution and use in source and binary forms, with or without 7215990Sjmallett * modification, are permitted provided that the following conditions are 8215990Sjmallett * met: 9210284Sjmallett * 10215990Sjmallett * * Redistributions of source code must retain the above copyright 11215990Sjmallett * notice, this list of conditions and the following disclaimer. 12210284Sjmallett * 13215990Sjmallett * * Redistributions in binary form must reproduce the above 14215990Sjmallett * copyright notice, this list of conditions and the following 15215990Sjmallett * disclaimer in the documentation and/or other materials provided 16215990Sjmallett * with the distribution. 17210284Sjmallett 18215990Sjmallett * * Neither the name of Cavium Networks nor the names of 19215990Sjmallett * its contributors may be used to endorse or promote products 20215990Sjmallett * derived from this software without specific prior written 21215990Sjmallett * permission. 22210284Sjmallett 23215990Sjmallett * This Software, including technical data, may be subject to U.S. export control 24215990Sjmallett * laws, including the U.S. Export Administration Act and its associated 25215990Sjmallett * regulations, and may be subject to export or import regulations in other 26215990Sjmallett * countries. 27210284Sjmallett 28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29215990Sjmallett * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38215990Sjmallett ***********************license end**************************************/ 39210284Sjmallett 40210284Sjmallett 41210284Sjmallett/** 42210284Sjmallett * @file 43210284Sjmallett * 44210284Sjmallett * Interface to the hardware Packet Order / Work unit. 45210284Sjmallett * 46210284Sjmallett * New, starting with SDK 1.7.0, cvmx-pow supports a number of 47210284Sjmallett * extended consistency checks. The define 48210284Sjmallett * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW 49210284Sjmallett * internal state checks to find common programming errors. If 50210284Sjmallett * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default 51210284Sjmallett * enabled. For example, cvmx-pow will check for the following 52210284Sjmallett * program errors or POW state inconsistency. 53210284Sjmallett * - Requesting a POW operation with an active tag switch in 54210284Sjmallett * progress. 55210284Sjmallett * - Waiting for a tag switch to complete for an excessively 56210284Sjmallett * long period. This is normally a sign of an error in locking 57210284Sjmallett * causing deadlock. 58210284Sjmallett * - Illegal tag switches from NULL_NULL. 59210284Sjmallett * - Illegal tag switches from NULL. 60210284Sjmallett * - Illegal deschedule request. 61210284Sjmallett * - WQE pointer not matching the one attached to the core by 62210284Sjmallett * the POW. 63210284Sjmallett * 64215990Sjmallett * <hr>$Revision: 49448 $<hr> 65210284Sjmallett */ 66210284Sjmallett 67210284Sjmallett#ifndef __CVMX_POW_H__ 68210284Sjmallett#define __CVMX_POW_H__ 69210284Sjmallett 70210284Sjmallett#include "cvmx-scratch.h" 71210284Sjmallett#include "cvmx-wqe.h" 72215990Sjmallett 73215990Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_KERNEL 74210284Sjmallett#include "cvmx-warn.h" 75215990Sjmallett#endif 76210284Sjmallett 77210284Sjmallett#ifdef __cplusplus 78210284Sjmallettextern "C" { 79210284Sjmallett#endif 80210284Sjmallett 81210284Sjmallett/* Default to having all POW constancy checks turned on */ 82210284Sjmallett#ifndef CVMX_ENABLE_POW_CHECKS 83210284Sjmallett#define CVMX_ENABLE_POW_CHECKS 1 84210284Sjmallett#endif 85210284Sjmallett 86210284Sjmallett/** 87210284Sjmallett * Wait flag values for pow functions. 88210284Sjmallett */ 89210284Sjmalletttypedef enum 90210284Sjmallett{ 91210284Sjmallett CVMX_POW_WAIT = 1, 92210284Sjmallett CVMX_POW_NO_WAIT = 0, 93210284Sjmallett} cvmx_pow_wait_t; 94210284Sjmallett 95210284Sjmallett/** 96210284Sjmallett * POW tag operations. These are used in the data stored to the POW. 97210284Sjmallett */ 98210284Sjmalletttypedef enum 99210284Sjmallett{ 100210284Sjmallett CVMX_POW_TAG_OP_SWTAG = 0L, /**< switch the tag (only) for this PP 101210284Sjmallett - the previous tag should be non-NULL in this case 102210284Sjmallett - tag switch response required 103210284Sjmallett - fields used: op, type, tag */ 104210284Sjmallett CVMX_POW_TAG_OP_SWTAG_FULL = 1L, /**< switch the tag for this PP, with full information 105210284Sjmallett - this should be used when the previous tag is NULL 106210284Sjmallett - tag switch response required 107210284Sjmallett - fields used: address, op, grp, type, tag */ 108210284Sjmallett CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, /**< switch the tag (and/or group) for this PP and de-schedule 109210284Sjmallett - OK to keep the tag the same and only change the group 110210284Sjmallett - fields used: op, no_sched, grp, type, tag */ 111210284Sjmallett CVMX_POW_TAG_OP_DESCH = 3L, /**< just de-schedule 112210284Sjmallett - fields used: op, no_sched */ 113210284Sjmallett CVMX_POW_TAG_OP_ADDWQ = 4L, /**< create an entirely new work queue entry 114210284Sjmallett - fields used: address, op, qos, grp, type, tag */ 115210284Sjmallett CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP 116210284Sjmallett - fields used: address, op, grp */ 117210284Sjmallett CVMX_POW_TAG_OP_SET_NSCHED = 6L, /**< set the no_sched bit on the de-schedule list 118210284Sjmallett - does nothing if the selected entry is not on the de-schedule list 119210284Sjmallett - does nothing if the stored work queue pointer does not match the address field 120210284Sjmallett - fields used: address, index, op 121210284Sjmallett Before issuing a *_NSCHED operation, SW must guarantee that all 122210284Sjmallett prior deschedules and set/clr NSCHED operations are complete and all 123210284Sjmallett prior switches are complete. The hardware provides the opsdone bit 124210284Sjmallett and swdone bit for SW polling. After issuing a *_NSCHED operation, 125210284Sjmallett SW must guarantee that the set/clr NSCHED is complete before 126210284Sjmallett any subsequent operations. */ 127210284Sjmallett CVMX_POW_TAG_OP_CLR_NSCHED = 7L, /**< clears the no_sched bit on the de-schedule list 128210284Sjmallett - does nothing if the selected entry is not on the de-schedule list 129210284Sjmallett - does nothing if the stored work queue pointer does not match the address field 130210284Sjmallett - fields used: address, index, op 131210284Sjmallett Before issuing a *_NSCHED operation, SW must guarantee that all 132210284Sjmallett prior deschedules and set/clr NSCHED operations are complete and all 133210284Sjmallett prior switches are complete. The hardware provides the opsdone bit 134210284Sjmallett and swdone bit for SW polling. After issuing a *_NSCHED operation, 135210284Sjmallett SW must guarantee that the set/clr NSCHED is complete before 136210284Sjmallett any subsequent operations. */ 137210284Sjmallett CVMX_POW_TAG_OP_NOP = 15L /**< do nothing */ 138210284Sjmallett} cvmx_pow_tag_op_t; 139210284Sjmallett 140210284Sjmallett/** 141210284Sjmallett * This structure defines the store data on a store to POW 142210284Sjmallett */ 143210284Sjmalletttypedef union 144210284Sjmallett{ 145210284Sjmallett uint64_t u64; 146210284Sjmallett struct 147210284Sjmallett { 148210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 149210284Sjmallett uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */ 150210284Sjmallett uint64_t unused : 2; 151210284Sjmallett uint64_t index :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */ 152210284Sjmallett cvmx_pow_tag_op_t op : 4; /**< the operation to perform */ 153210284Sjmallett uint64_t unused2 : 2; 154210284Sjmallett uint64_t qos : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */ 155210284Sjmallett uint64_t grp : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */ 156210284Sjmallett cvmx_pow_tag_type_t type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */ 157210284Sjmallett uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */ 158210284Sjmallett#else 159210284Sjmallett uint64_t tag :32; 160210284Sjmallett cvmx_pow_tag_type_t type : 3; 161210284Sjmallett uint64_t grp : 4; 162210284Sjmallett uint64_t qos : 3; 163210284Sjmallett uint64_t unused2 : 2; 164210284Sjmallett cvmx_pow_tag_op_t op : 4; 165210284Sjmallett uint64_t index :13; 166210284Sjmallett uint64_t unused : 2; 167210284Sjmallett uint64_t no_sched : 1; 168210284Sjmallett#endif 169210284Sjmallett } s; 170210284Sjmallett} cvmx_pow_tag_req_t; 171210284Sjmallett 172210284Sjmallett/** 173210284Sjmallett * This structure describes the address to load stuff from POW 174210284Sjmallett */ 175210284Sjmalletttypedef union 176210284Sjmallett{ 177210284Sjmallett uint64_t u64; 178210284Sjmallett 179210284Sjmallett /** 180210284Sjmallett * Address for new work request loads (did<2:0> == 0) 181210284Sjmallett */ 182210284Sjmallett struct 183210284Sjmallett { 184210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 185210284Sjmallett uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 186210284Sjmallett uint64_t reserved_49_61 : 13; /**< Must be zero */ 187210284Sjmallett uint64_t is_io : 1; /**< Must be one */ 188210284Sjmallett uint64_t did : 8; /**< the ID of POW -- did<2:0> == 0 in this case */ 189210284Sjmallett uint64_t reserved_4_39 : 36; /**< Must be zero */ 190210284Sjmallett uint64_t wait : 1; /**< If set, don't return load response until work is available */ 191210284Sjmallett uint64_t reserved_0_2 : 3; /**< Must be zero */ 192210284Sjmallett#else 193210284Sjmallett uint64_t reserved_0_2 : 3; 194210284Sjmallett uint64_t wait : 1; 195210284Sjmallett uint64_t reserved_4_39 : 36; 196210284Sjmallett uint64_t did : 8; 197210284Sjmallett uint64_t is_io : 1; 198210284Sjmallett uint64_t reserved_49_61 : 13; 199210284Sjmallett uint64_t mem_region : 2; 200210284Sjmallett#endif 201210284Sjmallett } swork; 202210284Sjmallett 203210284Sjmallett /** 204210284Sjmallett * Address for loads to get POW internal status 205210284Sjmallett */ 206210284Sjmallett struct 207210284Sjmallett { 208210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 209210284Sjmallett uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 210210284Sjmallett uint64_t reserved_49_61 : 13; /**< Must be zero */ 211210284Sjmallett uint64_t is_io : 1; /**< Must be one */ 212210284Sjmallett uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */ 213210284Sjmallett uint64_t reserved_10_39 : 30; /**< Must be zero */ 214210284Sjmallett uint64_t coreid : 4; /**< The core id to get status for */ 215210284Sjmallett uint64_t get_rev : 1; /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */ 216210284Sjmallett uint64_t get_cur : 1; /**< If set, return current status rather than pending status */ 217210284Sjmallett uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type */ 218210284Sjmallett uint64_t reserved_0_2 : 3; /**< Must be zero */ 219210284Sjmallett#else 220210284Sjmallett uint64_t reserved_0_2 : 3; 221210284Sjmallett uint64_t get_wqp : 1; 222210284Sjmallett uint64_t get_cur : 1; 223210284Sjmallett uint64_t get_rev : 1; 224210284Sjmallett uint64_t coreid : 4; 225210284Sjmallett uint64_t reserved_10_39 : 30; 226210284Sjmallett uint64_t did : 8; 227210284Sjmallett uint64_t is_io : 1; 228210284Sjmallett uint64_t reserved_49_61 : 13; 229210284Sjmallett uint64_t mem_region : 2; 230210284Sjmallett#endif 231210284Sjmallett } sstatus; 232210284Sjmallett 233210284Sjmallett /** 234210284Sjmallett * Address for memory loads to get POW internal state 235210284Sjmallett */ 236210284Sjmallett struct 237210284Sjmallett { 238210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 239210284Sjmallett uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 240210284Sjmallett uint64_t reserved_49_61 : 13; /**< Must be zero */ 241210284Sjmallett uint64_t is_io : 1; /**< Must be one */ 242210284Sjmallett uint64_t did : 8; /**< the ID of POW -- did<2:0> == 2 in this case */ 243210284Sjmallett uint64_t reserved_16_39 : 24; /**< Must be zero */ 244210284Sjmallett uint64_t index : 11; /**< POW memory index */ 245210284Sjmallett uint64_t get_des : 1; /**< If set, return deschedule information rather than the standard 246210284Sjmallett response for work-queue index (invalid if the work-queue entry is not on the 247210284Sjmallett deschedule list). */ 248210284Sjmallett uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */ 249210284Sjmallett uint64_t reserved_0_2 : 3; /**< Must be zero */ 250210284Sjmallett#else 251210284Sjmallett uint64_t reserved_0_2 : 3; 252210284Sjmallett uint64_t get_wqp : 1; 253210284Sjmallett uint64_t get_des : 1; 254210284Sjmallett uint64_t index : 11; 255210284Sjmallett uint64_t reserved_16_39 : 24; 256210284Sjmallett uint64_t did : 8; 257210284Sjmallett uint64_t is_io : 1; 258210284Sjmallett uint64_t reserved_49_61 : 13; 259210284Sjmallett uint64_t mem_region : 2; 260210284Sjmallett#endif 261210284Sjmallett } smemload; 262210284Sjmallett 263210284Sjmallett /** 264210284Sjmallett * Address for index/pointer loads 265210284Sjmallett */ 266210284Sjmallett struct 267210284Sjmallett { 268210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 269210284Sjmallett uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 270210284Sjmallett uint64_t reserved_49_61 : 13; /**< Must be zero */ 271210284Sjmallett uint64_t is_io : 1; /**< Must be one */ 272210284Sjmallett uint64_t did : 8; /**< the ID of POW -- did<2:0> == 3 in this case */ 273210284Sjmallett uint64_t reserved_9_39 : 31; /**< Must be zero */ 274210284Sjmallett uint64_t qosgrp : 4; /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of 275210284Sjmallett eight POW internal-input queues (0-7), one per QOS level; values 8-15 are 276210284Sjmallett illegal in this case; 277210284Sjmallett when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of 278210284Sjmallett 16 deschedule lists (per group); 279210284Sjmallett when get_rmt ==1, this field selects one of 16 memory-input queue lists. 280210284Sjmallett The two memory-input queue lists associated with each QOS level are: 281210284Sjmallett - qosgrp = 0, qosgrp = 8: QOS0 282210284Sjmallett - qosgrp = 1, qosgrp = 9: QOS1 283210284Sjmallett - qosgrp = 2, qosgrp = 10: QOS2 284210284Sjmallett - qosgrp = 3, qosgrp = 11: QOS3 285210284Sjmallett - qosgrp = 4, qosgrp = 12: QOS4 286210284Sjmallett - qosgrp = 5, qosgrp = 13: QOS5 287210284Sjmallett - qosgrp = 6, qosgrp = 14: QOS6 288210284Sjmallett - qosgrp = 7, qosgrp = 15: QOS7 */ 289210284Sjmallett uint64_t get_des_get_tail: 1; /**< If set and get_rmt is clear, return deschedule list indexes 290210284Sjmallett rather than indexes for the specified qos level; if set and get_rmt is set, return 291210284Sjmallett the tail pointer rather than the head pointer for the specified qos level. */ 292210284Sjmallett uint64_t get_rmt : 1; /**< If set, return remote pointers rather than the local indexes for the specified qos level. */ 293210284Sjmallett uint64_t reserved_0_2 : 3; /**< Must be zero */ 294210284Sjmallett#else 295210284Sjmallett uint64_t reserved_0_2 : 3; 296210284Sjmallett uint64_t get_rmt : 1; 297210284Sjmallett uint64_t get_des_get_tail: 1; 298210284Sjmallett uint64_t qosgrp : 4; 299210284Sjmallett uint64_t reserved_9_39 : 31; 300210284Sjmallett uint64_t did : 8; 301210284Sjmallett uint64_t is_io : 1; 302210284Sjmallett uint64_t reserved_49_61 : 13; 303210284Sjmallett uint64_t mem_region : 2; 304210284Sjmallett#endif 305210284Sjmallett } sindexload; 306210284Sjmallett 307210284Sjmallett /** 308210284Sjmallett * address for NULL_RD request (did<2:0> == 4) 309210284Sjmallett * when this is read, HW attempts to change the state to NULL if it is NULL_NULL 310210284Sjmallett * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available - 311210284Sjmallett * software may need to recover by finishing another piece of work before a POW 312210284Sjmallett * entry can ever become available.) 313210284Sjmallett */ 314210284Sjmallett struct 315210284Sjmallett { 316210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 317210284Sjmallett uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 318210284Sjmallett uint64_t reserved_49_61 : 13; /**< Must be zero */ 319210284Sjmallett uint64_t is_io : 1; /**< Must be one */ 320210284Sjmallett uint64_t did : 8; /**< the ID of POW -- did<2:0> == 4 in this case */ 321210284Sjmallett uint64_t reserved_0_39 : 40; /**< Must be zero */ 322210284Sjmallett#else 323210284Sjmallett uint64_t reserved_0_39 : 40; 324210284Sjmallett uint64_t did : 8; 325210284Sjmallett uint64_t is_io : 1; 326210284Sjmallett uint64_t reserved_49_61 : 13; 327210284Sjmallett uint64_t mem_region : 2; 328210284Sjmallett#endif 329210284Sjmallett } snull_rd; 330210284Sjmallett} cvmx_pow_load_addr_t; 331210284Sjmallett 332210284Sjmallett/** 333210284Sjmallett * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads) 334210284Sjmallett */ 335210284Sjmalletttypedef union 336210284Sjmallett{ 337210284Sjmallett uint64_t u64; 338210284Sjmallett 339210284Sjmallett /** 340210284Sjmallett * Response to new work request loads 341210284Sjmallett */ 342210284Sjmallett struct 343210284Sjmallett { 344210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 345210284Sjmallett uint64_t no_work : 1; /**< Set when no new work queue entry was returned. 346210284Sjmallett If there was de-scheduled work, the HW will definitely 347210284Sjmallett return it. When this bit is set, it could mean 348210284Sjmallett either mean: 349210284Sjmallett - There was no work, or 350210284Sjmallett - There was no work that the HW could find. This 351210284Sjmallett case can happen, regardless of the wait bit value 352210284Sjmallett in the original request, when there is work 353210284Sjmallett in the IQ's that is too deep down the list. */ 354210284Sjmallett uint64_t reserved_40_62 : 23; /**< Must be zero */ 355210284Sjmallett uint64_t addr : 40; /**< 36 in O1 -- the work queue pointer */ 356210284Sjmallett#else 357210284Sjmallett uint64_t addr : 40; 358210284Sjmallett uint64_t reserved_40_62 : 23; 359210284Sjmallett uint64_t no_work : 1; 360210284Sjmallett#endif 361210284Sjmallett } s_work; 362210284Sjmallett 363210284Sjmallett /** 364210284Sjmallett * Result for a POW Status Load (when get_cur==0 and get_wqp==0) 365210284Sjmallett */ 366210284Sjmallett struct 367210284Sjmallett { 368210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 369210284Sjmallett uint64_t reserved_62_63 : 2; 370210284Sjmallett uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or 371210284Sjmallett SWTAG_FULL, and the POW entry has not left the list for the original tag. */ 372210284Sjmallett uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */ 373210284Sjmallett uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */ 374210284Sjmallett uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */ 375210284Sjmallett uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */ 376210284Sjmallett uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */ 377210284Sjmallett uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */ 378210284Sjmallett uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */ 379210284Sjmallett uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */ 380210284Sjmallett uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */ 381210284Sjmallett uint64_t reserved_51 : 1; 382210284Sjmallett uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */ 383210284Sjmallett uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */ 384210284Sjmallett uint64_t reserved_34_35 : 2; 385210284Sjmallett uint64_t pend_type : 2; /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */ 386210284Sjmallett uint64_t pend_tag : 32; /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */ 387210284Sjmallett#else 388210284Sjmallett uint64_t pend_tag : 32; 389210284Sjmallett uint64_t pend_type : 2; 390210284Sjmallett uint64_t reserved_34_35 : 2; 391210284Sjmallett uint64_t pend_grp : 4; 392210284Sjmallett uint64_t pend_index : 11; 393210284Sjmallett uint64_t reserved_51 : 1; 394210284Sjmallett uint64_t pend_nosched_clr: 1; 395210284Sjmallett uint64_t pend_null_rd : 1; 396210284Sjmallett uint64_t pend_new_work_wait: 1; 397210284Sjmallett uint64_t pend_new_work : 1; 398210284Sjmallett uint64_t pend_nosched : 1; 399210284Sjmallett uint64_t pend_desched_switch: 1; 400210284Sjmallett uint64_t pend_desched : 1; 401210284Sjmallett uint64_t pend_switch_null: 1; 402210284Sjmallett uint64_t pend_switch_full: 1; 403210284Sjmallett uint64_t pend_switch : 1; 404210284Sjmallett uint64_t reserved_62_63 : 2; 405210284Sjmallett#endif 406210284Sjmallett } s_sstatus0; 407210284Sjmallett 408210284Sjmallett /** 409210284Sjmallett * Result for a POW Status Load (when get_cur==0 and get_wqp==1) 410210284Sjmallett */ 411210284Sjmallett struct 412210284Sjmallett { 413210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 414210284Sjmallett uint64_t reserved_62_63 : 2; 415210284Sjmallett uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or 416210284Sjmallett SWTAG_FULL, and the POW entry has not left the list for the original tag. */ 417210284Sjmallett uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */ 418210284Sjmallett uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */ 419210284Sjmallett uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */ 420210284Sjmallett uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */ 421210284Sjmallett uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */ 422210284Sjmallett uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */ 423210284Sjmallett uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */ 424210284Sjmallett uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */ 425210284Sjmallett uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */ 426210284Sjmallett uint64_t reserved_51 : 1; 427210284Sjmallett uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */ 428210284Sjmallett uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */ 429210284Sjmallett uint64_t pend_wqp : 36; /**< This is the wqp when pend_nosched_clr is set. */ 430210284Sjmallett#else 431210284Sjmallett uint64_t pend_wqp : 36; 432210284Sjmallett uint64_t pend_grp : 4; 433210284Sjmallett uint64_t pend_index : 11; 434210284Sjmallett uint64_t reserved_51 : 1; 435210284Sjmallett uint64_t pend_nosched_clr: 1; 436210284Sjmallett uint64_t pend_null_rd : 1; 437210284Sjmallett uint64_t pend_new_work_wait: 1; 438210284Sjmallett uint64_t pend_new_work : 1; 439210284Sjmallett uint64_t pend_nosched : 1; 440210284Sjmallett uint64_t pend_desched_switch: 1; 441210284Sjmallett uint64_t pend_desched : 1; 442210284Sjmallett uint64_t pend_switch_null: 1; 443210284Sjmallett uint64_t pend_switch_full: 1; 444210284Sjmallett uint64_t pend_switch : 1; 445210284Sjmallett uint64_t reserved_62_63 : 2; 446210284Sjmallett#endif 447210284Sjmallett } s_sstatus1; 448210284Sjmallett 449210284Sjmallett /** 450210284Sjmallett * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0) 451210284Sjmallett */ 452210284Sjmallett struct 453210284Sjmallett { 454210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 455210284Sjmallett uint64_t reserved_62_63 : 2; 456210284Sjmallett uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and 457210284Sjmallett tag_type is not NULL or NULL_NULL). */ 458210284Sjmallett uint64_t index : 11; /**< The POW entry attached to the core. */ 459210284Sjmallett uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 460210284Sjmallett uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in 461210284Sjmallett the NULL or NULL_NULL state). */ 462210284Sjmallett uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the 463210284Sjmallett NULL or NULL_NULL state). */ 464210284Sjmallett uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list 465210284Sjmallett entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 466210284Sjmallett uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on 467210284Sjmallett SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 468210284Sjmallett#else 469210284Sjmallett uint64_t tag : 32; 470210284Sjmallett uint64_t tag_type : 2; 471210284Sjmallett uint64_t tail : 1; 472210284Sjmallett uint64_t head : 1; 473210284Sjmallett uint64_t grp : 4; 474210284Sjmallett uint64_t index : 11; 475210284Sjmallett uint64_t link_index : 11; 476210284Sjmallett uint64_t reserved_62_63 : 2; 477210284Sjmallett#endif 478210284Sjmallett } s_sstatus2; 479210284Sjmallett 480210284Sjmallett /** 481210284Sjmallett * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1) 482210284Sjmallett */ 483210284Sjmallett struct 484210284Sjmallett { 485210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 486210284Sjmallett uint64_t reserved_62_63 : 2; 487210284Sjmallett uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0 488210284Sjmallett (and tag_type is not NULL or NULL_NULL). This field is unpredictable 489210284Sjmallett when the core's state is NULL or NULL_NULL. */ 490210284Sjmallett uint64_t index : 11; /**< The POW entry attached to the core. */ 491210284Sjmallett uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 492210284Sjmallett uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in 493210284Sjmallett the NULL or NULL_NULL state). */ 494210284Sjmallett uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the 495210284Sjmallett NULL or NULL_NULL state). */ 496210284Sjmallett uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list 497210284Sjmallett entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 498210284Sjmallett uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on 499210284Sjmallett SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 500210284Sjmallett#else 501210284Sjmallett uint64_t tag : 32; 502210284Sjmallett uint64_t tag_type : 2; 503210284Sjmallett uint64_t tail : 1; 504210284Sjmallett uint64_t head : 1; 505210284Sjmallett uint64_t grp : 4; 506210284Sjmallett uint64_t index : 11; 507210284Sjmallett uint64_t revlink_index : 11; 508210284Sjmallett uint64_t reserved_62_63 : 2; 509210284Sjmallett#endif 510210284Sjmallett } s_sstatus3; 511210284Sjmallett 512210284Sjmallett /** 513210284Sjmallett * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0) 514210284Sjmallett */ 515210284Sjmallett struct 516210284Sjmallett { 517210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 518210284Sjmallett uint64_t reserved_62_63 : 2; 519210284Sjmallett uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and 520210284Sjmallett tag_type is not NULL or NULL_NULL). */ 521210284Sjmallett uint64_t index : 11; /**< The POW entry attached to the core. */ 522210284Sjmallett uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 523210284Sjmallett uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */ 524210284Sjmallett#else 525210284Sjmallett uint64_t wqp : 36; 526210284Sjmallett uint64_t grp : 4; 527210284Sjmallett uint64_t index : 11; 528210284Sjmallett uint64_t link_index : 11; 529210284Sjmallett uint64_t reserved_62_63 : 2; 530210284Sjmallett#endif 531210284Sjmallett } s_sstatus4; 532210284Sjmallett 533210284Sjmallett /** 534210284Sjmallett * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1) 535210284Sjmallett */ 536210284Sjmallett struct 537210284Sjmallett { 538210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 539210284Sjmallett uint64_t reserved_62_63 : 2; 540210284Sjmallett uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0 541210284Sjmallett (and tag_type is not NULL or NULL_NULL). This field is unpredictable 542210284Sjmallett when the core's state is NULL or NULL_NULL. */ 543210284Sjmallett uint64_t index : 11; /**< The POW entry attached to the core. */ 544210284Sjmallett uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 545210284Sjmallett uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */ 546210284Sjmallett#else 547210284Sjmallett uint64_t wqp : 36; 548210284Sjmallett uint64_t grp : 4; 549210284Sjmallett uint64_t index : 11; 550210284Sjmallett uint64_t revlink_index : 11; 551210284Sjmallett uint64_t reserved_62_63 : 2; 552210284Sjmallett#endif 553210284Sjmallett } s_sstatus5; 554210284Sjmallett 555210284Sjmallett /** 556210284Sjmallett * Result For POW Memory Load (get_des == 0 and get_wqp == 0) 557210284Sjmallett */ 558210284Sjmallett struct 559210284Sjmallett { 560210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 561210284Sjmallett uint64_t reserved_51_63 : 13; 562210284Sjmallett uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list 563210284Sjmallett (unpredictable if entry is the tail of the list). */ 564210284Sjmallett uint64_t grp : 4; /**< The group of the POW entry. */ 565210284Sjmallett uint64_t reserved_35 : 1; 566210284Sjmallett uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the 567210284Sjmallett NULL or NULL_NULL state). */ 568210284Sjmallett uint64_t tag_type : 2; /**< The tag type of the POW entry. */ 569210284Sjmallett uint64_t tag : 32; /**< The tag of the POW entry. */ 570210284Sjmallett#else 571210284Sjmallett uint64_t tag : 32; 572210284Sjmallett uint64_t tag_type : 2; 573210284Sjmallett uint64_t tail : 1; 574210284Sjmallett uint64_t reserved_35 : 1; 575210284Sjmallett uint64_t grp : 4; 576210284Sjmallett uint64_t next_index : 11; 577210284Sjmallett uint64_t reserved_51_63 : 13; 578210284Sjmallett#endif 579210284Sjmallett } s_smemload0; 580210284Sjmallett 581210284Sjmallett /** 582210284Sjmallett * Result For POW Memory Load (get_des == 0 and get_wqp == 1) 583210284Sjmallett */ 584210284Sjmallett struct 585210284Sjmallett { 586210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 587210284Sjmallett uint64_t reserved_51_63 : 13; 588210284Sjmallett uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list 589210284Sjmallett (unpredictable if entry is the tail of the list). */ 590210284Sjmallett uint64_t grp : 4; /**< The group of the POW entry. */ 591210284Sjmallett uint64_t wqp : 36; /**< The WQP held in the POW entry. */ 592210284Sjmallett#else 593210284Sjmallett uint64_t wqp : 36; 594210284Sjmallett uint64_t grp : 4; 595210284Sjmallett uint64_t next_index : 11; 596210284Sjmallett uint64_t reserved_51_63 : 13; 597210284Sjmallett#endif 598210284Sjmallett } s_smemload1; 599210284Sjmallett 600210284Sjmallett /** 601210284Sjmallett * Result For POW Memory Load (get_des == 1) 602210284Sjmallett */ 603210284Sjmallett struct 604210284Sjmallett { 605210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 606210284Sjmallett uint64_t reserved_51_63 : 13; 607210284Sjmallett uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */ 608210284Sjmallett uint64_t grp : 4; /**< The group of the POW entry. */ 609210284Sjmallett uint64_t nosched : 1; /**< The nosched bit for the POW entry. */ 610210284Sjmallett uint64_t pend_switch : 1; /**< There is a pending tag switch */ 611210284Sjmallett uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */ 612210284Sjmallett uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */ 613210284Sjmallett#else 614210284Sjmallett uint64_t pend_tag : 32; 615210284Sjmallett uint64_t pend_type : 2; 616210284Sjmallett uint64_t pend_switch : 1; 617210284Sjmallett uint64_t nosched : 1; 618210284Sjmallett uint64_t grp : 4; 619210284Sjmallett uint64_t fwd_index : 11; 620210284Sjmallett uint64_t reserved_51_63 : 13; 621210284Sjmallett#endif 622210284Sjmallett } s_smemload2; 623210284Sjmallett 624210284Sjmallett /** 625210284Sjmallett * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0) 626210284Sjmallett */ 627210284Sjmallett struct 628210284Sjmallett { 629210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 630210284Sjmallett uint64_t reserved_52_63 : 12; 631210284Sjmallett uint64_t free_val : 1; /**< - set when there is one or more POW entries on the free list. */ 632210284Sjmallett uint64_t free_one : 1; /**< - set when there is exactly one POW entry on the free list. */ 633210284Sjmallett uint64_t reserved_49 : 1; 634210284Sjmallett uint64_t free_head : 11; /**< - when free_val is set, indicates the first entry on the free list. */ 635210284Sjmallett uint64_t reserved_37 : 1; 636210284Sjmallett uint64_t free_tail : 11; /**< - when free_val is set, indicates the last entry on the free list. */ 637210284Sjmallett uint64_t loc_val : 1; /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */ 638210284Sjmallett uint64_t loc_one : 1; /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */ 639210284Sjmallett uint64_t reserved_23 : 1; 640210284Sjmallett uint64_t loc_head : 11; /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */ 641210284Sjmallett uint64_t reserved_11 : 1; 642210284Sjmallett uint64_t loc_tail : 11; /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */ 643210284Sjmallett#else 644210284Sjmallett uint64_t loc_tail : 11; 645210284Sjmallett uint64_t reserved_11 : 1; 646210284Sjmallett uint64_t loc_head : 11; 647210284Sjmallett uint64_t reserved_23 : 1; 648210284Sjmallett uint64_t loc_one : 1; 649210284Sjmallett uint64_t loc_val : 1; 650210284Sjmallett uint64_t free_tail : 11; 651210284Sjmallett uint64_t reserved_37 : 1; 652210284Sjmallett uint64_t free_head : 11; 653210284Sjmallett uint64_t reserved_49 : 1; 654210284Sjmallett uint64_t free_one : 1; 655210284Sjmallett uint64_t free_val : 1; 656210284Sjmallett uint64_t reserved_52_63 : 12; 657210284Sjmallett#endif 658210284Sjmallett } sindexload0; 659210284Sjmallett 660210284Sjmallett /** 661210284Sjmallett * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1) 662210284Sjmallett */ 663210284Sjmallett struct 664210284Sjmallett { 665210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 666210284Sjmallett uint64_t reserved_52_63 : 12; 667210284Sjmallett uint64_t nosched_val : 1; /**< - set when there is one or more POW entries on the nosched list. */ 668210284Sjmallett uint64_t nosched_one : 1; /**< - set when there is exactly one POW entry on the nosched list. */ 669210284Sjmallett uint64_t reserved_49 : 1; 670210284Sjmallett uint64_t nosched_head : 11; /**< - when nosched_val is set, indicates the first entry on the nosched list. */ 671210284Sjmallett uint64_t reserved_37 : 1; 672210284Sjmallett uint64_t nosched_tail : 11; /**< - when nosched_val is set, indicates the last entry on the nosched list. */ 673210284Sjmallett uint64_t des_val : 1; /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */ 674210284Sjmallett uint64_t des_one : 1; /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */ 675210284Sjmallett uint64_t reserved_23 : 1; 676210284Sjmallett uint64_t des_head : 11; /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */ 677210284Sjmallett uint64_t reserved_11 : 1; 678210284Sjmallett uint64_t des_tail : 11; /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */ 679210284Sjmallett#else 680210284Sjmallett uint64_t des_tail : 11; 681210284Sjmallett uint64_t reserved_11 : 1; 682210284Sjmallett uint64_t des_head : 11; 683210284Sjmallett uint64_t reserved_23 : 1; 684210284Sjmallett uint64_t des_one : 1; 685210284Sjmallett uint64_t des_val : 1; 686210284Sjmallett uint64_t nosched_tail : 11; 687210284Sjmallett uint64_t reserved_37 : 1; 688210284Sjmallett uint64_t nosched_head : 11; 689210284Sjmallett uint64_t reserved_49 : 1; 690210284Sjmallett uint64_t nosched_one : 1; 691210284Sjmallett uint64_t nosched_val : 1; 692210284Sjmallett uint64_t reserved_52_63 : 12; 693210284Sjmallett#endif 694210284Sjmallett } sindexload1; 695210284Sjmallett 696210284Sjmallett /** 697210284Sjmallett * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0) 698210284Sjmallett */ 699210284Sjmallett struct 700210284Sjmallett { 701210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 702210284Sjmallett uint64_t reserved_39_63 : 25; 703210284Sjmallett uint64_t rmt_is_head : 1; /**< Set when this DRAM list is the current head (i.e. is the next to 704210284Sjmallett be reloaded when the POW hardware reloads a POW entry from DRAM). The 705210284Sjmallett POW hardware alternates between the two DRAM lists associated with a QOS 706210284Sjmallett level when it reloads work from DRAM into the POW unit. */ 707210284Sjmallett uint64_t rmt_val : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp 708210284Sjmallett contains one or more pieces of work. */ 709210284Sjmallett uint64_t rmt_one : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp 710210284Sjmallett contains exactly one piece of work. */ 711210284Sjmallett uint64_t rmt_head : 36; /**< When rmt_val is set, indicates the first piece of work on the 712210284Sjmallett DRAM input Q list selected by qosgrp. */ 713210284Sjmallett#else 714210284Sjmallett uint64_t rmt_head : 36; 715210284Sjmallett uint64_t rmt_one : 1; 716210284Sjmallett uint64_t rmt_val : 1; 717210284Sjmallett uint64_t rmt_is_head : 1; 718210284Sjmallett uint64_t reserved_39_63 : 25; 719210284Sjmallett#endif 720210284Sjmallett } sindexload2; 721210284Sjmallett 722210284Sjmallett /** 723210284Sjmallett * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1) 724210284Sjmallett */ 725210284Sjmallett struct 726210284Sjmallett { 727210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 728210284Sjmallett uint64_t reserved_39_63 : 25; 729210284Sjmallett uint64_t rmt_is_head : 1; /**< - set when this DRAM list is the current head (i.e. is the next to 730210284Sjmallett be reloaded when the POW hardware reloads a POW entry from DRAM). The 731210284Sjmallett POW hardware alternates between the two DRAM lists associated with a QOS 732210284Sjmallett level when it reloads work from DRAM into the POW unit. */ 733210284Sjmallett uint64_t rmt_val : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp 734210284Sjmallett contains one or more pieces of work. */ 735210284Sjmallett uint64_t rmt_one : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp 736210284Sjmallett contains exactly one piece of work. */ 737210284Sjmallett uint64_t rmt_tail : 36; /**< - when rmt_val is set, indicates the last piece of work on the DRAM 738210284Sjmallett input Q list selected by qosgrp. */ 739210284Sjmallett#else 740210284Sjmallett uint64_t rmt_tail : 36; 741210284Sjmallett uint64_t rmt_one : 1; 742210284Sjmallett uint64_t rmt_val : 1; 743210284Sjmallett uint64_t rmt_is_head : 1; 744210284Sjmallett uint64_t reserved_39_63 : 25; 745210284Sjmallett#endif 746210284Sjmallett } sindexload3; 747210284Sjmallett 748210284Sjmallett /** 749210284Sjmallett * Response to NULL_RD request loads 750210284Sjmallett */ 751210284Sjmallett struct 752210284Sjmallett { 753210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 754210284Sjmallett uint64_t unused : 62; 755210284Sjmallett uint64_t state : 2; /**< of type cvmx_pow_tag_type_t. state is one of the following: 756210284Sjmallett - CVMX_POW_TAG_TYPE_ORDERED 757210284Sjmallett - CVMX_POW_TAG_TYPE_ATOMIC 758210284Sjmallett - CVMX_POW_TAG_TYPE_NULL 759210284Sjmallett - CVMX_POW_TAG_TYPE_NULL_NULL */ 760210284Sjmallett#else 761210284Sjmallett uint64_t state : 2; 762210284Sjmallett uint64_t unused : 62; 763210284Sjmallett#endif 764210284Sjmallett } s_null_rd; 765210284Sjmallett 766210284Sjmallett} cvmx_pow_tag_load_resp_t; 767210284Sjmallett 768210284Sjmallett/** 769210284Sjmallett * This structure describes the address used for stores to the POW. 770210284Sjmallett * The store address is meaningful on stores to the POW. The hardware assumes that an aligned 771210284Sjmallett * 64-bit store was used for all these stores. 772210284Sjmallett * Note the assumption that the work queue entry is aligned on an 8-byte 773210284Sjmallett * boundary (since the low-order 3 address bits must be zero). 774210284Sjmallett * Note that not all fields are used by all operations. 775210284Sjmallett * 776210284Sjmallett * NOTE: The following is the behavior of the pending switch bit at the PP 777210284Sjmallett * for POW stores (i.e. when did<7:3> == 0xc) 778210284Sjmallett * - did<2:0> == 0 => pending switch bit is set 779210284Sjmallett * - did<2:0> == 1 => no affect on the pending switch bit 780210284Sjmallett * - did<2:0> == 3 => pending switch bit is cleared 781210284Sjmallett * - did<2:0> == 7 => no affect on the pending switch bit 782210284Sjmallett * - did<2:0> == others => must not be used 783210284Sjmallett * - No other loads/stores have an affect on the pending switch bit 784210284Sjmallett * - The switch bus from POW can clear the pending switch bit 785210284Sjmallett * 786210284Sjmallett * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command 787210284Sjmallett * that only contains the pointer). SW must never use did<2:0> == 2. 788210284Sjmallett */ 789210284Sjmalletttypedef union 790210284Sjmallett{ 791210284Sjmallett /** 792210284Sjmallett * Unsigned 64 bit integer representation of store address 793210284Sjmallett */ 794210284Sjmallett uint64_t u64; 795210284Sjmallett 796210284Sjmallett struct 797210284Sjmallett { 798210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 799210284Sjmallett uint64_t mem_reg : 2; /**< Memory region. Should be CVMX_IO_SEG in most cases */ 800210284Sjmallett uint64_t reserved_49_61 : 13; /**< Must be zero */ 801210284Sjmallett uint64_t is_io : 1; /**< Must be one */ 802210284Sjmallett uint64_t did : 8; /**< Device ID of POW. Note that different sub-dids are used. */ 803210284Sjmallett uint64_t reserved_36_39 : 4; /**< Must be zero */ 804210284Sjmallett uint64_t addr : 36; /**< Address field. addr<2:0> must be zero */ 805210284Sjmallett#else 806210284Sjmallett uint64_t addr : 36; 807210284Sjmallett uint64_t reserved_36_39 : 4; 808210284Sjmallett uint64_t did : 8; 809210284Sjmallett uint64_t is_io : 1; 810210284Sjmallett uint64_t reserved_49_61 : 13; 811210284Sjmallett uint64_t mem_reg : 2; 812210284Sjmallett#endif 813210284Sjmallett } stag; 814210284Sjmallett} cvmx_pow_tag_store_addr_t; 815210284Sjmallett 816210284Sjmallett/** 817210284Sjmallett * decode of the store data when an IOBDMA SENDSINGLE is sent to POW 818210284Sjmallett */ 819210284Sjmalletttypedef union 820210284Sjmallett{ 821210284Sjmallett uint64_t u64; 822210284Sjmallett 823210284Sjmallett struct 824210284Sjmallett { 825210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN 826210284Sjmallett uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */ 827210284Sjmallett uint64_t len : 8; /**< the number of words in the response (0 => no response) */ 828210284Sjmallett uint64_t did : 8; /**< the ID of the device on the non-coherent bus */ 829210284Sjmallett uint64_t unused :36; 830210284Sjmallett uint64_t wait : 1; /**< if set, don't return load response until work is available */ 831210284Sjmallett uint64_t unused2 : 3; 832210284Sjmallett#else 833210284Sjmallett uint64_t unused2 : 3; 834210284Sjmallett uint64_t wait : 1; 835210284Sjmallett uint64_t unused :36; 836210284Sjmallett uint64_t did : 8; 837210284Sjmallett uint64_t len : 8; 838210284Sjmallett uint64_t scraddr : 8; 839210284Sjmallett#endif 840210284Sjmallett } s; 841210284Sjmallett 842210284Sjmallett} cvmx_pow_iobdma_store_t; 843210284Sjmallett 844210284Sjmallett 845215990Sjmallett/* CSR typedefs have been moved to cvmx-pow-defs.h */ 846210284Sjmallett 847210284Sjmallett/** 848210284Sjmallett * Get the POW tag for this core. This returns the current 849210284Sjmallett * tag type, tag, group, and POW entry index associated with 850210284Sjmallett * this core. Index is only valid if the tag type isn't NULL_NULL. 851210284Sjmallett * If a tag switch is pending this routine returns the tag before 852210284Sjmallett * the tag switch, not after. 853210284Sjmallett * 854210284Sjmallett * @return Current tag 855210284Sjmallett */ 856210284Sjmallettstatic inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void) 857210284Sjmallett{ 858210284Sjmallett cvmx_pow_load_addr_t load_addr; 859210284Sjmallett cvmx_pow_tag_load_resp_t load_resp; 860210284Sjmallett cvmx_pow_tag_req_t result; 861210284Sjmallett 862210284Sjmallett load_addr.u64 = 0; 863210284Sjmallett load_addr.sstatus.mem_region = CVMX_IO_SEG; 864210284Sjmallett load_addr.sstatus.is_io = 1; 865210284Sjmallett load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; 866210284Sjmallett load_addr.sstatus.coreid = cvmx_get_core_num(); 867210284Sjmallett load_addr.sstatus.get_cur = 1; 868210284Sjmallett load_resp.u64 = cvmx_read_csr(load_addr.u64); 869210284Sjmallett result.u64 = 0; 870210284Sjmallett result.s.grp = load_resp.s_sstatus2.grp; 871210284Sjmallett result.s.index = load_resp.s_sstatus2.index; 872210284Sjmallett result.s.type = (cvmx_pow_tag_type_t)load_resp.s_sstatus2.tag_type; 873210284Sjmallett result.s.tag = load_resp.s_sstatus2.tag; 874210284Sjmallett return result; 875210284Sjmallett} 876210284Sjmallett 877210284Sjmallett 878210284Sjmallett/** 879210284Sjmallett * Get the POW WQE for this core. This returns the work queue 880210284Sjmallett * entry currently associated with this core. 881210284Sjmallett * 882210284Sjmallett * @return WQE pointer 883210284Sjmallett */ 884210284Sjmallettstatic inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void) 885210284Sjmallett{ 886210284Sjmallett cvmx_pow_load_addr_t load_addr; 887210284Sjmallett cvmx_pow_tag_load_resp_t load_resp; 888210284Sjmallett 889210284Sjmallett load_addr.u64 = 0; 890210284Sjmallett load_addr.sstatus.mem_region = CVMX_IO_SEG; 891210284Sjmallett load_addr.sstatus.is_io = 1; 892210284Sjmallett load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; 893210284Sjmallett load_addr.sstatus.coreid = cvmx_get_core_num(); 894210284Sjmallett load_addr.sstatus.get_cur = 1; 895210284Sjmallett load_addr.sstatus.get_wqp = 1; 896210284Sjmallett load_resp.u64 = cvmx_read_csr(load_addr.u64); 897210284Sjmallett return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp); 898210284Sjmallett} 899210284Sjmallett 900210284Sjmallett 901210284Sjmallett/** 902210284Sjmallett * @INTERNAL 903210284Sjmallett * Print a warning if a tag switch is pending for this core 904210284Sjmallett * 905210284Sjmallett * @param function Function name checking for a pending tag switch 906210284Sjmallett */ 907210284Sjmallettstatic inline void __cvmx_pow_warn_if_pending_switch(const char *function) 908210284Sjmallett{ 909210284Sjmallett uint64_t switch_complete; 910210284Sjmallett CVMX_MF_CHORD(switch_complete); 911210284Sjmallett cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function); 912210284Sjmallett} 913210284Sjmallett 914210284Sjmallett 915210284Sjmallett/** 916210284Sjmallett * Waits for a tag switch to complete by polling the completion bit. 917210284Sjmallett * Note that switches to NULL complete immediately and do not need 918210284Sjmallett * to be waited for. 919210284Sjmallett */ 920210284Sjmallettstatic inline void cvmx_pow_tag_sw_wait(void) 921210284Sjmallett{ 922210284Sjmallett const uint64_t MAX_CYCLES = 1ull<<31; 923210284Sjmallett uint64_t switch_complete; 924210284Sjmallett uint64_t start_cycle = cvmx_get_cycle(); 925210284Sjmallett while (1) 926210284Sjmallett { 927210284Sjmallett CVMX_MF_CHORD(switch_complete); 928210284Sjmallett if (cvmx_unlikely(switch_complete)) 929210284Sjmallett break; 930210284Sjmallett if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) 931210284Sjmallett { 932210284Sjmallett cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n"); 933210284Sjmallett start_cycle = -MAX_CYCLES-1; 934210284Sjmallett } 935210284Sjmallett } 936210284Sjmallett} 937210284Sjmallett 938210284Sjmallett 939210284Sjmallett/** 940210284Sjmallett * Synchronous work request. Requests work from the POW. 941210284Sjmallett * This function does NOT wait for previous tag switches to complete, 942210284Sjmallett * so the caller must ensure that there is not a pending tag switch. 943210284Sjmallett * 944210284Sjmallett * @param wait When set, call stalls until work becomes avaiable, or times out. 945210284Sjmallett * If not set, returns immediately. 946210284Sjmallett * 947210284Sjmallett * @return Returns the WQE pointer from POW. Returns NULL if no work was available. 948210284Sjmallett */ 949210284Sjmallettstatic inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait) 950210284Sjmallett{ 951210284Sjmallett cvmx_pow_load_addr_t ptr; 952210284Sjmallett cvmx_pow_tag_load_resp_t result; 953210284Sjmallett 954210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 955210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 956210284Sjmallett 957210284Sjmallett ptr.u64 = 0; 958210284Sjmallett ptr.swork.mem_region = CVMX_IO_SEG; 959210284Sjmallett ptr.swork.is_io = 1; 960210284Sjmallett ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG; 961210284Sjmallett ptr.swork.wait = wait; 962210284Sjmallett 963210284Sjmallett result.u64 = cvmx_read_csr(ptr.u64); 964210284Sjmallett 965210284Sjmallett if (result.s_work.no_work) 966210284Sjmallett return NULL; 967210284Sjmallett else 968210284Sjmallett return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr); 969210284Sjmallett} 970210284Sjmallett 971210284Sjmallett 972210284Sjmallett/** 973210284Sjmallett * Synchronous work request. Requests work from the POW. 974210284Sjmallett * This function waits for any previous tag switch to complete before 975210284Sjmallett * requesting the new work. 976210284Sjmallett * 977210284Sjmallett * @param wait When set, call stalls until work becomes avaiable, or times out. 978210284Sjmallett * If not set, returns immediately. 979210284Sjmallett * 980210284Sjmallett * @return Returns the WQE pointer from POW. Returns NULL if no work was available. 981210284Sjmallett */ 982210284Sjmallettstatic inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) 983210284Sjmallett{ 984210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 985210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 986210284Sjmallett 987210284Sjmallett /* Must not have a switch pending when requesting work */ 988210284Sjmallett cvmx_pow_tag_sw_wait(); 989210284Sjmallett return(cvmx_pow_work_request_sync_nocheck(wait)); 990210284Sjmallett 991210284Sjmallett} 992210284Sjmallett 993210284Sjmallett 994210284Sjmallett/** 995210284Sjmallett * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state. 996210284Sjmallett * This function waits for any previous tag switch to complete before 997210284Sjmallett * requesting the null_rd. 998210284Sjmallett * 999210284Sjmallett * @return Returns the POW state of type cvmx_pow_tag_type_t. 1000210284Sjmallett */ 1001210284Sjmallettstatic inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void) 1002210284Sjmallett{ 1003210284Sjmallett cvmx_pow_load_addr_t ptr; 1004210284Sjmallett cvmx_pow_tag_load_resp_t result; 1005210284Sjmallett 1006210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1007210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1008210284Sjmallett 1009210284Sjmallett /* Must not have a switch pending when requesting work */ 1010210284Sjmallett cvmx_pow_tag_sw_wait(); 1011210284Sjmallett 1012210284Sjmallett ptr.u64 = 0; 1013210284Sjmallett ptr.snull_rd.mem_region = CVMX_IO_SEG; 1014210284Sjmallett ptr.snull_rd.is_io = 1; 1015210284Sjmallett ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD; 1016210284Sjmallett 1017210284Sjmallett result.u64 = cvmx_read_csr(ptr.u64); 1018210284Sjmallett 1019210284Sjmallett return (cvmx_pow_tag_type_t)result.s_null_rd.state; 1020210284Sjmallett} 1021210284Sjmallett 1022210284Sjmallett 1023210284Sjmallett/** 1024210284Sjmallett * Asynchronous work request. Work is requested from the POW unit, and should later 1025210284Sjmallett * be checked with function cvmx_pow_work_response_async. 1026210284Sjmallett * This function does NOT wait for previous tag switches to complete, 1027210284Sjmallett * so the caller must ensure that there is not a pending tag switch. 1028210284Sjmallett * 1029210284Sjmallett * @param scr_addr Scratch memory address that response will be returned to, 1030210284Sjmallett * which is either a valid WQE, or a response with the invalid bit set. 1031210284Sjmallett * Byte address, must be 8 byte aligned. 1032210284Sjmallett * @param wait 1 to cause response to wait for work to become available (or timeout) 1033210284Sjmallett * 0 to cause response to return immediately 1034210284Sjmallett */ 1035210284Sjmallettstatic inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait) 1036210284Sjmallett{ 1037210284Sjmallett cvmx_pow_iobdma_store_t data; 1038210284Sjmallett 1039210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1040210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1041210284Sjmallett 1042210284Sjmallett /* scr_addr must be 8 byte aligned */ 1043210284Sjmallett data.s.scraddr = scr_addr >> 3; 1044210284Sjmallett data.s.len = 1; 1045210284Sjmallett data.s.did = CVMX_OCT_DID_TAG_SWTAG; 1046210284Sjmallett data.s.wait = wait; 1047210284Sjmallett cvmx_send_single(data.u64); 1048210284Sjmallett} 1049210284Sjmallett/** 1050210284Sjmallett * Asynchronous work request. Work is requested from the POW unit, and should later 1051210284Sjmallett * be checked with function cvmx_pow_work_response_async. 1052210284Sjmallett * This function waits for any previous tag switch to complete before 1053210284Sjmallett * requesting the new work. 1054210284Sjmallett * 1055210284Sjmallett * @param scr_addr Scratch memory address that response will be returned to, 1056210284Sjmallett * which is either a valid WQE, or a response with the invalid bit set. 1057210284Sjmallett * Byte address, must be 8 byte aligned. 1058210284Sjmallett * @param wait 1 to cause response to wait for work to become available (or timeout) 1059210284Sjmallett * 0 to cause response to return immediately 1060210284Sjmallett */ 1061210284Sjmallettstatic inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait) 1062210284Sjmallett{ 1063210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1064210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1065210284Sjmallett 1066210284Sjmallett /* Must not have a switch pending when requesting work */ 1067210284Sjmallett cvmx_pow_tag_sw_wait(); 1068210284Sjmallett cvmx_pow_work_request_async_nocheck(scr_addr, wait); 1069210284Sjmallett} 1070210284Sjmallett 1071210284Sjmallett 1072210284Sjmallett/** 1073210284Sjmallett * Gets result of asynchronous work request. Performs a IOBDMA sync 1074210284Sjmallett * to wait for the response. 1075210284Sjmallett * 1076210284Sjmallett * @param scr_addr Scratch memory address to get result from 1077210284Sjmallett * Byte address, must be 8 byte aligned. 1078210284Sjmallett * @return Returns the WQE from the scratch register, or NULL if no work was available. 1079210284Sjmallett */ 1080210284Sjmallettstatic inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr) 1081210284Sjmallett{ 1082210284Sjmallett cvmx_pow_tag_load_resp_t result; 1083210284Sjmallett 1084210284Sjmallett CVMX_SYNCIOBDMA; 1085210284Sjmallett result.u64 = cvmx_scratch_read64(scr_addr); 1086210284Sjmallett 1087210284Sjmallett if (result.s_work.no_work) 1088210284Sjmallett return NULL; 1089210284Sjmallett else 1090210284Sjmallett return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr); 1091210284Sjmallett} 1092210284Sjmallett 1093210284Sjmallett 1094210284Sjmallett/** 1095210284Sjmallett * Checks if a work queue entry pointer returned by a work 1096210284Sjmallett * request is valid. It may be invalid due to no work 1097210284Sjmallett * being available or due to a timeout. 1098210284Sjmallett * 1099210284Sjmallett * @param wqe_ptr pointer to a work queue entry returned by the POW 1100210284Sjmallett * 1101210284Sjmallett * @return 0 if pointer is valid 1102210284Sjmallett * 1 if invalid (no work was returned) 1103210284Sjmallett */ 1104210284Sjmallettstatic inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr) 1105210284Sjmallett{ 1106210284Sjmallett return (wqe_ptr == NULL); 1107210284Sjmallett} 1108210284Sjmallett 1109210284Sjmallett 1110210284Sjmallett 1111210284Sjmallett/** 1112210284Sjmallett * Starts a tag switch to the provided tag value and tag type. Completion for 1113210284Sjmallett * the tag switch must be checked for separately. 1114210284Sjmallett * This function does NOT update the 1115210284Sjmallett * work queue entry in dram to match tag value and type, so the application must 1116210284Sjmallett * keep track of these if they are important to the application. 1117210284Sjmallett * This tag switch command must not be used for switches to NULL, as the tag 1118210284Sjmallett * switch pending bit will be set by the switch request, but never cleared by the 1119210284Sjmallett * hardware. 1120210284Sjmallett * 1121210284Sjmallett * NOTE: This should not be used when switching from a NULL tag. Use 1122210284Sjmallett * cvmx_pow_tag_sw_full() instead. 1123210284Sjmallett * 1124210284Sjmallett * This function does no checks, so the caller must ensure that any previous tag 1125210284Sjmallett * switch has completed. 1126210284Sjmallett * 1127210284Sjmallett * @param tag new tag value 1128210284Sjmallett * @param tag_type new tag type (ordered or atomic) 1129210284Sjmallett */ 1130210284Sjmallettstatic inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type) 1131210284Sjmallett{ 1132210284Sjmallett cvmx_addr_t ptr; 1133210284Sjmallett cvmx_pow_tag_req_t tag_req; 1134210284Sjmallett 1135210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1136210284Sjmallett { 1137210284Sjmallett cvmx_pow_tag_req_t current_tag; 1138210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1139210284Sjmallett current_tag = cvmx_pow_get_current_tag(); 1140210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1141210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__); 1142210284Sjmallett cvmx_warn_if((current_tag.s.type == tag_type) && (current_tag.s.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__); 1143210284Sjmallett cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__); 1144210284Sjmallett } 1145210284Sjmallett 1146210284Sjmallett /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM 1147210284Sjmallett ** once the WQE is in flight. See hardware manual for complete details. 1148210284Sjmallett ** It is the application's responsibility to keep track of the current tag 1149210284Sjmallett ** value if that is important. 1150210284Sjmallett */ 1151210284Sjmallett 1152210284Sjmallett 1153210284Sjmallett tag_req.u64 = 0; 1154210284Sjmallett tag_req.s.op = CVMX_POW_TAG_OP_SWTAG; 1155210284Sjmallett tag_req.s.tag = tag; 1156210284Sjmallett tag_req.s.type = tag_type; 1157210284Sjmallett 1158210284Sjmallett ptr.u64 = 0; 1159210284Sjmallett ptr.sio.mem_region = CVMX_IO_SEG; 1160210284Sjmallett ptr.sio.is_io = 1; 1161210284Sjmallett ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG; 1162210284Sjmallett 1163210284Sjmallett /* once this store arrives at POW, it will attempt the switch 1164210284Sjmallett software must wait for the switch to complete separately */ 1165210284Sjmallett cvmx_write_io(ptr.u64, tag_req.u64); 1166210284Sjmallett} 1167210284Sjmallett 1168210284Sjmallett 1169210284Sjmallett/** 1170210284Sjmallett * Starts a tag switch to the provided tag value and tag type. Completion for 1171210284Sjmallett * the tag switch must be checked for separately. 1172210284Sjmallett * This function does NOT update the 1173210284Sjmallett * work queue entry in dram to match tag value and type, so the application must 1174210284Sjmallett * keep track of these if they are important to the application. 1175210284Sjmallett * This tag switch command must not be used for switches to NULL, as the tag 1176210284Sjmallett * switch pending bit will be set by the switch request, but never cleared by the 1177210284Sjmallett * hardware. 1178210284Sjmallett * 1179210284Sjmallett * NOTE: This should not be used when switching from a NULL tag. Use 1180210284Sjmallett * cvmx_pow_tag_sw_full() instead. 1181210284Sjmallett * 1182210284Sjmallett * This function waits for any previous tag switch to complete, and also 1183210284Sjmallett * displays an error on tag switches to NULL. 1184210284Sjmallett * 1185210284Sjmallett * @param tag new tag value 1186210284Sjmallett * @param tag_type new tag type (ordered or atomic) 1187210284Sjmallett */ 1188210284Sjmallettstatic inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type) 1189210284Sjmallett{ 1190210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1191210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1192210284Sjmallett 1193210284Sjmallett /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM 1194210284Sjmallett ** once the WQE is in flight. See hardware manual for complete details. 1195210284Sjmallett ** It is the application's responsibility to keep track of the current tag 1196210284Sjmallett ** value if that is important. 1197210284Sjmallett */ 1198210284Sjmallett 1199210284Sjmallett /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1200210284Sjmallett ** if a previous switch is still pending. */ 1201210284Sjmallett cvmx_pow_tag_sw_wait(); 1202210284Sjmallett cvmx_pow_tag_sw_nocheck(tag, tag_type); 1203210284Sjmallett} 1204210284Sjmallett 1205210284Sjmallett 1206210284Sjmallett/** 1207210284Sjmallett * Starts a tag switch to the provided tag value and tag type. Completion for 1208210284Sjmallett * the tag switch must be checked for separately. 1209210284Sjmallett * This function does NOT update the 1210210284Sjmallett * work queue entry in dram to match tag value and type, so the application must 1211210284Sjmallett * keep track of these if they are important to the application. 1212210284Sjmallett * This tag switch command must not be used for switches to NULL, as the tag 1213210284Sjmallett * switch pending bit will be set by the switch request, but never cleared by the 1214210284Sjmallett * hardware. 1215210284Sjmallett * 1216210284Sjmallett * This function must be used for tag switches from NULL. 1217210284Sjmallett * 1218210284Sjmallett * This function does no checks, so the caller must ensure that any previous tag 1219210284Sjmallett * switch has completed. 1220210284Sjmallett * 1221210284Sjmallett * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters 1222210284Sjmallett * @param tag tag value to be assigned to work queue entry 1223210284Sjmallett * @param tag_type type of tag 1224210284Sjmallett * @param group group value for the work queue entry. 1225210284Sjmallett */ 1226210284Sjmallettstatic inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group) 1227210284Sjmallett{ 1228210284Sjmallett cvmx_addr_t ptr; 1229210284Sjmallett cvmx_pow_tag_req_t tag_req; 1230210284Sjmallett 1231210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1232210284Sjmallett { 1233210284Sjmallett cvmx_pow_tag_req_t current_tag; 1234210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1235210284Sjmallett current_tag = cvmx_pow_get_current_tag(); 1236210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1237210284Sjmallett cvmx_warn_if((current_tag.s.type == tag_type) && (current_tag.s.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__); 1238210284Sjmallett cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__); 1239210284Sjmallett if (wqp != cvmx_phys_to_ptr(0x80)) 1240210284Sjmallett cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp()); 1241210284Sjmallett } 1242210284Sjmallett 1243210284Sjmallett /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM 1244210284Sjmallett ** once the WQE is in flight. See hardware manual for complete details. 1245210284Sjmallett ** It is the application's responsibility to keep track of the current tag 1246210284Sjmallett ** value if that is important. 1247210284Sjmallett */ 1248210284Sjmallett 1249210284Sjmallett tag_req.u64 = 0; 1250210284Sjmallett tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL; 1251210284Sjmallett tag_req.s.tag = tag; 1252210284Sjmallett tag_req.s.type = tag_type; 1253210284Sjmallett tag_req.s.grp = group; 1254210284Sjmallett 1255210284Sjmallett ptr.u64 = 0; 1256210284Sjmallett ptr.sio.mem_region = CVMX_IO_SEG; 1257210284Sjmallett ptr.sio.is_io = 1; 1258210284Sjmallett ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG; 1259210284Sjmallett ptr.sio.offset = CAST64(wqp); 1260210284Sjmallett 1261210284Sjmallett /* once this store arrives at POW, it will attempt the switch 1262210284Sjmallett software must wait for the switch to complete separately */ 1263210284Sjmallett cvmx_write_io(ptr.u64, tag_req.u64); 1264210284Sjmallett} 1265210284Sjmallett 1266210284Sjmallett 1267210284Sjmallett/** 1268210284Sjmallett * Starts a tag switch to the provided tag value and tag type. Completion for 1269210284Sjmallett * the tag switch must be checked for separately. 1270210284Sjmallett * This function does NOT update the 1271210284Sjmallett * work queue entry in dram to match tag value and type, so the application must 1272210284Sjmallett * keep track of these if they are important to the application. 1273210284Sjmallett * This tag switch command must not be used for switches to NULL, as the tag 1274210284Sjmallett * switch pending bit will be set by the switch request, but never cleared by the 1275210284Sjmallett * hardware. 1276210284Sjmallett * 1277210284Sjmallett * This function must be used for tag switches from NULL. 1278210284Sjmallett * 1279210284Sjmallett * This function waits for any pending tag switches to complete 1280210284Sjmallett * before requesting the tag switch. 1281210284Sjmallett * 1282210284Sjmallett * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters 1283210284Sjmallett * @param tag tag value to be assigned to work queue entry 1284210284Sjmallett * @param tag_type type of tag 1285210284Sjmallett * @param group group value for the work queue entry. 1286210284Sjmallett */ 1287210284Sjmallettstatic inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group) 1288210284Sjmallett{ 1289210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1290210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1291210284Sjmallett 1292210284Sjmallett /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1293210284Sjmallett ** if a previous switch is still pending. */ 1294210284Sjmallett cvmx_pow_tag_sw_wait(); 1295210284Sjmallett cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group); 1296210284Sjmallett} 1297210284Sjmallett 1298210284Sjmallett 1299210284Sjmallett/** 1300210284Sjmallett * Switch to a NULL tag, which ends any ordering or 1301210284Sjmallett * synchronization provided by the POW for the current 1302210284Sjmallett * work queue entry. This operation completes immediatly, 1303210284Sjmallett * so completetion should not be waited for. 1304210284Sjmallett * This function does NOT wait for previous tag switches to complete, 1305210284Sjmallett * so the caller must ensure that any previous tag switches have completed. 1306210284Sjmallett */ 1307210284Sjmallettstatic inline void cvmx_pow_tag_sw_null_nocheck(void) 1308210284Sjmallett{ 1309210284Sjmallett cvmx_addr_t ptr; 1310210284Sjmallett cvmx_pow_tag_req_t tag_req; 1311210284Sjmallett 1312210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1313210284Sjmallett { 1314210284Sjmallett cvmx_pow_tag_req_t current_tag; 1315210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1316210284Sjmallett current_tag = cvmx_pow_get_current_tag(); 1317210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1318210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__); 1319210284Sjmallett } 1320210284Sjmallett 1321210284Sjmallett tag_req.u64 = 0; 1322210284Sjmallett tag_req.s.op = CVMX_POW_TAG_OP_SWTAG; 1323210284Sjmallett tag_req.s.type = CVMX_POW_TAG_TYPE_NULL; 1324210284Sjmallett 1325210284Sjmallett 1326210284Sjmallett ptr.u64 = 0; 1327210284Sjmallett ptr.sio.mem_region = CVMX_IO_SEG; 1328210284Sjmallett ptr.sio.is_io = 1; 1329210284Sjmallett ptr.sio.did = CVMX_OCT_DID_TAG_TAG1; 1330210284Sjmallett 1331210284Sjmallett 1332210284Sjmallett cvmx_write_io(ptr.u64, tag_req.u64); 1333210284Sjmallett 1334210284Sjmallett /* switch to NULL completes immediately */ 1335210284Sjmallett} 1336210284Sjmallett 1337210284Sjmallett/** 1338210284Sjmallett * Switch to a NULL tag, which ends any ordering or 1339210284Sjmallett * synchronization provided by the POW for the current 1340210284Sjmallett * work queue entry. This operation completes immediatly, 1341210284Sjmallett * so completetion should not be waited for. 1342210284Sjmallett * This function waits for any pending tag switches to complete 1343210284Sjmallett * before requesting the switch to NULL. 1344210284Sjmallett */ 1345210284Sjmallettstatic inline void cvmx_pow_tag_sw_null(void) 1346210284Sjmallett{ 1347210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1348210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1349210284Sjmallett 1350210284Sjmallett /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1351210284Sjmallett ** if a previous switch is still pending. */ 1352210284Sjmallett cvmx_pow_tag_sw_wait(); 1353210284Sjmallett cvmx_pow_tag_sw_null_nocheck(); 1354210284Sjmallett 1355210284Sjmallett /* switch to NULL completes immediately */ 1356210284Sjmallett} 1357210284Sjmallett 1358210284Sjmallett 1359210284Sjmallett 1360210284Sjmallett/** 1361210284Sjmallett * Submits work to an input queue. This function updates the work queue entry in DRAM to match 1362210284Sjmallett * the arguments given. 1363210284Sjmallett * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that 1364210284Sjmallett * the core currently holds. 1365210284Sjmallett * 1366210284Sjmallett * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters 1367210284Sjmallett * @param tag tag value to be assigned to work queue entry 1368210284Sjmallett * @param tag_type type of tag 1369210284Sjmallett * @param qos Input queue to add to. 1370210284Sjmallett * @param grp group value for the work queue entry. 1371210284Sjmallett */ 1372210284Sjmallettstatic inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp) 1373210284Sjmallett{ 1374210284Sjmallett cvmx_addr_t ptr; 1375210284Sjmallett cvmx_pow_tag_req_t tag_req; 1376210284Sjmallett 1377210284Sjmallett wqp->qos = qos; 1378210284Sjmallett wqp->tag = tag; 1379210284Sjmallett wqp->tag_type = tag_type; 1380210284Sjmallett wqp->grp = grp; 1381210284Sjmallett 1382210284Sjmallett tag_req.u64 = 0; 1383210284Sjmallett tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ; 1384210284Sjmallett tag_req.s.type = tag_type; 1385210284Sjmallett tag_req.s.tag = tag; 1386210284Sjmallett tag_req.s.qos = qos; 1387210284Sjmallett tag_req.s.grp = grp; 1388210284Sjmallett 1389210284Sjmallett 1390210284Sjmallett ptr.u64 = 0; 1391210284Sjmallett ptr.sio.mem_region = CVMX_IO_SEG; 1392210284Sjmallett ptr.sio.is_io = 1; 1393210284Sjmallett ptr.sio.did = CVMX_OCT_DID_TAG_TAG1; 1394210284Sjmallett ptr.sio.offset = cvmx_ptr_to_phys(wqp); 1395210284Sjmallett 1396210284Sjmallett /* SYNC write to memory before the work submit. This is necessary 1397210284Sjmallett ** as POW may read values from DRAM at this time */ 1398210284Sjmallett CVMX_SYNCWS; 1399210284Sjmallett cvmx_write_io(ptr.u64, tag_req.u64); 1400210284Sjmallett} 1401210284Sjmallett 1402210284Sjmallett 1403210284Sjmallett 1404210284Sjmallett/** 1405210284Sjmallett * This function sets the group mask for a core. The group mask 1406210284Sjmallett * indicates which groups each core will accept work from. There are 1407210284Sjmallett * 16 groups. 1408210284Sjmallett * 1409210284Sjmallett * @param core_num core to apply mask to 1410210284Sjmallett * @param mask Group mask. There are 16 groups, so only bits 0-15 are valid, 1411210284Sjmallett * representing groups 0-15. 1412210284Sjmallett * Each 1 bit in the mask enables the core to accept work from 1413210284Sjmallett * the corresponding group. 1414210284Sjmallett */ 1415210284Sjmallettstatic inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask) 1416210284Sjmallett{ 1417210284Sjmallett cvmx_pow_pp_grp_mskx_t grp_msk; 1418210284Sjmallett 1419210284Sjmallett grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num)); 1420210284Sjmallett grp_msk.s.grp_msk = mask; 1421210284Sjmallett cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64); 1422210284Sjmallett} 1423210284Sjmallett 1424210284Sjmallett/** 1425210284Sjmallett * This function sets POW static priorities for a core. Each input queue has 1426210284Sjmallett * an associated priority value. 1427210284Sjmallett * 1428210284Sjmallett * @param core_num core to apply priorities to 1429210284Sjmallett * @param priority Vector of 8 priorities, one per POW Input Queue (0-7). 1430210284Sjmallett * Highest priority is 0 and lowest is 7. A priority value 1431210284Sjmallett * of 0xF instructs POW to skip the Input Queue when 1432210284Sjmallett * scheduling to this specific core. 1433210284Sjmallett * NOTE: priorities should not have gaps in values, meaning 1434210284Sjmallett * {0,1,1,1,1,1,1,1} is a valid configuration while 1435210284Sjmallett * {0,2,2,2,2,2,2,2} is not. 1436210284Sjmallett */ 1437210284Sjmallettstatic inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[]) 1438210284Sjmallett{ 1439210284Sjmallett /* POW priorities are supported on CN5xxx and later */ 1440210284Sjmallett if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) 1441210284Sjmallett { 1442210284Sjmallett cvmx_pow_pp_grp_mskx_t grp_msk; 1443210284Sjmallett 1444210284Sjmallett grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num)); 1445210284Sjmallett grp_msk.s.qos0_pri = priority[0]; 1446210284Sjmallett grp_msk.s.qos1_pri = priority[1]; 1447210284Sjmallett grp_msk.s.qos2_pri = priority[2]; 1448210284Sjmallett grp_msk.s.qos3_pri = priority[3]; 1449210284Sjmallett grp_msk.s.qos4_pri = priority[4]; 1450210284Sjmallett grp_msk.s.qos5_pri = priority[5]; 1451210284Sjmallett grp_msk.s.qos6_pri = priority[6]; 1452210284Sjmallett grp_msk.s.qos7_pri = priority[7]; 1453210284Sjmallett 1454210284Sjmallett /* Detect gaps between priorities and flag error */ 1455210284Sjmallett { 1456210284Sjmallett int i; 1457210284Sjmallett uint32_t prio_mask = 0; 1458210284Sjmallett 1459210284Sjmallett for(i=0; i<8; i++) 1460210284Sjmallett if (priority[i] != 0xF) 1461210284Sjmallett prio_mask |= 1<<priority[i]; 1462210284Sjmallett 1463210284Sjmallett if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1)) 1464210284Sjmallett { 1465210284Sjmallett cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask); 1466210284Sjmallett return; 1467210284Sjmallett } 1468210284Sjmallett } 1469210284Sjmallett 1470210284Sjmallett cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64); 1471210284Sjmallett } 1472210284Sjmallett} 1473210284Sjmallett 1474210284Sjmallett/** 1475210284Sjmallett * Performs a tag switch and then an immediate deschedule. This completes 1476210284Sjmallett * immediatly, so completion must not be waited for. This function does NOT 1477210284Sjmallett * update the wqe in DRAM to match arguments. 1478210284Sjmallett * 1479210284Sjmallett * This function does NOT wait for any prior tag switches to complete, so the 1480210284Sjmallett * calling code must do this. 1481210284Sjmallett * 1482210284Sjmallett * Note the following CAVEAT of the Octeon HW behavior when 1483210284Sjmallett * re-scheduling DE-SCHEDULEd items whose (next) state is 1484210284Sjmallett * ORDERED: 1485210284Sjmallett * - If there are no switches pending at the time that the 1486210284Sjmallett * HW executes the de-schedule, the HW will only re-schedule 1487210284Sjmallett * the head of the FIFO associated with the given tag. This 1488210284Sjmallett * means that in many respects, the HW treats this ORDERED 1489210284Sjmallett * tag as an ATOMIC tag. Note that in the SWTAG_DESCH 1490210284Sjmallett * case (to an ORDERED tag), the HW will do the switch 1491210284Sjmallett * before the deschedule whenever it is possible to do 1492210284Sjmallett * the switch immediately, so it may often look like 1493210284Sjmallett * this case. 1494210284Sjmallett * - If there is a pending switch to ORDERED at the time 1495210284Sjmallett * the HW executes the de-schedule, the HW will perform 1496210284Sjmallett * the switch at the time it re-schedules, and will be 1497210284Sjmallett * able to reschedule any/all of the entries with the 1498210284Sjmallett * same tag. 1499210284Sjmallett * Due to this behavior, the RECOMMENDATION to software is 1500210284Sjmallett * that they have a (next) state of ATOMIC when they 1501210284Sjmallett * DE-SCHEDULE. If an ORDERED tag is what was really desired, 1502210284Sjmallett * SW can choose to immediately switch to an ORDERED tag 1503210284Sjmallett * after the work (that has an ATOMIC tag) is re-scheduled. 1504210284Sjmallett * Note that since there are never any tag switches pending 1505210284Sjmallett * when the HW re-schedules, this switch can be IMMEDIATE upon 1506210284Sjmallett * the reception of the pointer during the re-schedule. 1507210284Sjmallett * 1508210284Sjmallett * @param tag New tag value 1509210284Sjmallett * @param tag_type New tag type 1510210284Sjmallett * @param group New group value 1511210284Sjmallett * @param no_sched Control whether this work queue entry will be rescheduled. 1512210284Sjmallett * - 1 : don't schedule this work 1513210284Sjmallett * - 0 : allow this work to be scheduled. 1514210284Sjmallett */ 1515210284Sjmallettstatic inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched) 1516210284Sjmallett{ 1517210284Sjmallett cvmx_addr_t ptr; 1518210284Sjmallett cvmx_pow_tag_req_t tag_req; 1519210284Sjmallett 1520210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1521210284Sjmallett { 1522210284Sjmallett cvmx_pow_tag_req_t current_tag; 1523210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1524210284Sjmallett current_tag = cvmx_pow_get_current_tag(); 1525210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1526210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__); 1527210284Sjmallett cvmx_warn_if((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__); 1528210284Sjmallett } 1529210284Sjmallett 1530210284Sjmallett tag_req.u64 = 0; 1531210284Sjmallett tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH; 1532210284Sjmallett tag_req.s.tag = tag; 1533210284Sjmallett tag_req.s.type = tag_type; 1534210284Sjmallett tag_req.s.grp = group; 1535210284Sjmallett tag_req.s.no_sched = no_sched; 1536210284Sjmallett 1537210284Sjmallett ptr.u64 = 0; 1538210284Sjmallett ptr.sio.mem_region = CVMX_IO_SEG; 1539210284Sjmallett ptr.sio.is_io = 1; 1540210284Sjmallett ptr.sio.did = CVMX_OCT_DID_TAG_TAG3; 1541210284Sjmallett 1542215990Sjmallett cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */ 1543210284Sjmallett} 1544210284Sjmallett/** 1545210284Sjmallett * Performs a tag switch and then an immediate deschedule. This completes 1546210284Sjmallett * immediatly, so completion must not be waited for. This function does NOT 1547210284Sjmallett * update the wqe in DRAM to match arguments. 1548210284Sjmallett * 1549210284Sjmallett * This function waits for any prior tag switches to complete, so the 1550210284Sjmallett * calling code may call this function with a pending tag switch. 1551210284Sjmallett * 1552210284Sjmallett * Note the following CAVEAT of the Octeon HW behavior when 1553210284Sjmallett * re-scheduling DE-SCHEDULEd items whose (next) state is 1554210284Sjmallett * ORDERED: 1555210284Sjmallett * - If there are no switches pending at the time that the 1556210284Sjmallett * HW executes the de-schedule, the HW will only re-schedule 1557210284Sjmallett * the head of the FIFO associated with the given tag. This 1558210284Sjmallett * means that in many respects, the HW treats this ORDERED 1559210284Sjmallett * tag as an ATOMIC tag. Note that in the SWTAG_DESCH 1560210284Sjmallett * case (to an ORDERED tag), the HW will do the switch 1561210284Sjmallett * before the deschedule whenever it is possible to do 1562210284Sjmallett * the switch immediately, so it may often look like 1563210284Sjmallett * this case. 1564210284Sjmallett * - If there is a pending switch to ORDERED at the time 1565210284Sjmallett * the HW executes the de-schedule, the HW will perform 1566210284Sjmallett * the switch at the time it re-schedules, and will be 1567210284Sjmallett * able to reschedule any/all of the entries with the 1568210284Sjmallett * same tag. 1569210284Sjmallett * Due to this behavior, the RECOMMENDATION to software is 1570210284Sjmallett * that they have a (next) state of ATOMIC when they 1571210284Sjmallett * DE-SCHEDULE. If an ORDERED tag is what was really desired, 1572210284Sjmallett * SW can choose to immediately switch to an ORDERED tag 1573210284Sjmallett * after the work (that has an ATOMIC tag) is re-scheduled. 1574210284Sjmallett * Note that since there are never any tag switches pending 1575210284Sjmallett * when the HW re-schedules, this switch can be IMMEDIATE upon 1576210284Sjmallett * the reception of the pointer during the re-schedule. 1577210284Sjmallett * 1578210284Sjmallett * @param tag New tag value 1579210284Sjmallett * @param tag_type New tag type 1580210284Sjmallett * @param group New group value 1581210284Sjmallett * @param no_sched Control whether this work queue entry will be rescheduled. 1582210284Sjmallett * - 1 : don't schedule this work 1583210284Sjmallett * - 0 : allow this work to be scheduled. 1584210284Sjmallett */ 1585210284Sjmallettstatic inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched) 1586210284Sjmallett{ 1587210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1588210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1589210284Sjmallett 1590210284Sjmallett /* Need to make sure any writes to the work queue entry are complete */ 1591210284Sjmallett CVMX_SYNCWS; 1592210284Sjmallett /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1593210284Sjmallett ** if a previous switch is still pending. */ 1594210284Sjmallett cvmx_pow_tag_sw_wait(); 1595210284Sjmallett cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched); 1596210284Sjmallett} 1597210284Sjmallett 1598210284Sjmallett 1599210284Sjmallett 1600210284Sjmallett 1601210284Sjmallett 1602210284Sjmallett/** 1603210284Sjmallett * Descchedules the current work queue entry. 1604210284Sjmallett * 1605210284Sjmallett * @param no_sched no schedule flag value to be set on the work queue entry. If this is set 1606210284Sjmallett * the entry will not be rescheduled. 1607210284Sjmallett */ 1608210284Sjmallettstatic inline void cvmx_pow_desched(uint64_t no_sched) 1609210284Sjmallett{ 1610210284Sjmallett cvmx_addr_t ptr; 1611210284Sjmallett cvmx_pow_tag_req_t tag_req; 1612210284Sjmallett 1613210284Sjmallett if (CVMX_ENABLE_POW_CHECKS) 1614210284Sjmallett { 1615210284Sjmallett cvmx_pow_tag_req_t current_tag; 1616210284Sjmallett __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1617210284Sjmallett current_tag = cvmx_pow_get_current_tag(); 1618210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1619210284Sjmallett cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__); 1620210284Sjmallett } 1621210284Sjmallett 1622210284Sjmallett /* Need to make sure any writes to the work queue entry are complete */ 1623210284Sjmallett CVMX_SYNCWS; 1624210284Sjmallett 1625210284Sjmallett tag_req.u64 = 0; 1626210284Sjmallett tag_req.s.op = CVMX_POW_TAG_OP_DESCH; 1627210284Sjmallett tag_req.s.no_sched = no_sched; 1628210284Sjmallett 1629210284Sjmallett ptr.u64 = 0; 1630210284Sjmallett ptr.sio.mem_region = CVMX_IO_SEG; 1631210284Sjmallett ptr.sio.is_io = 1; 1632210284Sjmallett ptr.sio.did = CVMX_OCT_DID_TAG_TAG3; 1633210284Sjmallett 1634215990Sjmallett cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */ 1635210284Sjmallett} 1636210284Sjmallett 1637210284Sjmallett 1638210284Sjmallett 1639210284Sjmallett 1640210284Sjmallett 1641210284Sjmallett 1642210284Sjmallett 1643210284Sjmallett/*********************************************************************************************** 1644210284Sjmallett** Define usage of bits within the 32 bit tag values. 1645210284Sjmallett***********************************************************************************************/ 1646210284Sjmallett 1647210284Sjmallett/* 1648210284Sjmallett * Number of bits of the tag used by software. The SW bits 1649210284Sjmallett * are always a contiguous block of the high starting at bit 31. 1650210284Sjmallett * The hardware bits are always the low bits. By default, the top 8 bits 1651210284Sjmallett * of the tag are reserved for software, and the low 24 are set by the IPD unit. 1652210284Sjmallett */ 1653210284Sjmallett#define CVMX_TAG_SW_BITS (8) 1654210284Sjmallett#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS) 1655210284Sjmallett 1656210284Sjmallett/* Below is the list of values for the top 8 bits of the tag. */ 1657210284Sjmallett#define CVMX_TAG_SW_BITS_INTERNAL 0x1 /* Tag values with top byte of this value are reserved for internal executive uses */ 1658210284Sjmallett/* The executive divides the remaining 24 bits as follows: 1659210284Sjmallett** * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup 1660210284Sjmallett** * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup 1661210284Sjmallett** Note that this section describes the format of tags generated by software - refer to the 1662210284Sjmallett** hardware documentation for a description of the tags values generated by the packet input 1663210284Sjmallett** hardware. 1664210284Sjmallett** Subgroups are defined here */ 1665210284Sjmallett#define CVMX_TAG_SUBGROUP_MASK 0xFFFF /* Mask for the value portion of the tag */ 1666210284Sjmallett#define CVMX_TAG_SUBGROUP_SHIFT 16 1667210284Sjmallett#define CVMX_TAG_SUBGROUP_PKO 0x1 1668210284Sjmallett 1669210284Sjmallett 1670210284Sjmallett/* End of executive tag subgroup definitions */ 1671210284Sjmallett 1672210284Sjmallett/* The remaining values software bit values 0x2 - 0xff are available for application use */ 1673210284Sjmallett 1674210284Sjmallett 1675210284Sjmallett 1676210284Sjmallett/** 1677210284Sjmallett * This function creates a 32 bit tag value from the two values provided. 1678210284Sjmallett * 1679210284Sjmallett * @param sw_bits The upper bits (number depends on configuration) are set to this value. The remainder of 1680210284Sjmallett * bits are set by the hw_bits parameter. 1681210284Sjmallett * @param hw_bits The lower bits (number depends on configuration) are set to this value. The remainder of 1682210284Sjmallett * bits are set by the sw_bits parameter. 1683210284Sjmallett * 1684210284Sjmallett * @return 32 bit value of the combined hw and sw bits. 1685210284Sjmallett */ 1686210284Sjmallettstatic inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits) 1687210284Sjmallett{ 1688210284Sjmallett return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)))); 1689210284Sjmallett} 1690210284Sjmallett/** 1691210284Sjmallett * Extracts the bits allocated for software use from the tag 1692210284Sjmallett * 1693210284Sjmallett * @param tag 32 bit tag value 1694210284Sjmallett * 1695210284Sjmallett * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define 1696210284Sjmallett */ 1697210284Sjmallettstatic inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag) 1698210284Sjmallett{ 1699210284Sjmallett return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS)); 1700210284Sjmallett} 1701210284Sjmallett/** 1702210284Sjmallett * 1703210284Sjmallett * Extracts the bits allocated for hardware use from the tag 1704210284Sjmallett * 1705210284Sjmallett * @param tag 32 bit tag value 1706210284Sjmallett * 1707210284Sjmallett * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define 1708210284Sjmallett */ 1709210284Sjmallettstatic inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag) 1710210284Sjmallett{ 1711210284Sjmallett return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)); 1712210284Sjmallett} 1713210284Sjmallett 1714210284Sjmallett/** 1715210284Sjmallett * Store the current POW internal state into the supplied 1716210284Sjmallett * buffer. It is recommended that you pass a buffer of at least 1717210284Sjmallett * 128KB. The format of the capture may change based on SDK 1718210284Sjmallett * version and Octeon chip. 1719210284Sjmallett * 1720210284Sjmallett * @param buffer Buffer to store capture into 1721210284Sjmallett * @param buffer_size 1722210284Sjmallett * The size of the supplied buffer 1723210284Sjmallett * 1724210284Sjmallett * @return Zero on sucess, negative on failure 1725210284Sjmallett */ 1726210284Sjmallettextern int cvmx_pow_capture(void *buffer, int buffer_size); 1727210284Sjmallett 1728210284Sjmallett/** 1729210284Sjmallett * Dump a POW capture to the console in a human readable format. 1730210284Sjmallett * 1731210284Sjmallett * @param buffer POW capture from cvmx_pow_capture() 1732210284Sjmallett * @param buffer_size 1733210284Sjmallett * Size of the buffer 1734210284Sjmallett */ 1735210284Sjmallettextern void cvmx_pow_display(void *buffer, int buffer_size); 1736210284Sjmallett 1737210284Sjmallett/** 1738210284Sjmallett * Return the number of POW entries supported by this chip 1739210284Sjmallett * 1740210284Sjmallett * @return Number of POW entries 1741210284Sjmallett */ 1742210284Sjmallettextern int cvmx_pow_get_num_entries(void); 1743210284Sjmallett 1744210284Sjmallett 1745210284Sjmallett#ifdef __cplusplus 1746210284Sjmallett} 1747210284Sjmallett#endif 1748210284Sjmallett 1749215990Sjmallett#endif /* __CVMX_POW_H__ */ 1750