code
stringlengths 0
23.9M
|
---|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_SCSI_CMND_H
#define _SCSI_SCSI_CMND_H
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/t10-pi.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
struct Scsi_Host;
/*
* MAX_COMMAND_SIZE is:
* The longest fixed-length SCSI CDB as per the SCSI standard.
* fixed-length means: commands that their size can be determined
* by their opcode and the CDB does not carry a length specifier, (unlike
* the VARIABLE_LENGTH_CMD(0x7f) command). This is actually not exactly
* true and the SCSI standard also defines extended commands and
* vendor specific commands that can be bigger than 16 bytes. The kernel
* will support these using the same infrastructure used for VARLEN CDB's.
* So in effect MAX_COMMAND_SIZE means the maximum size command scsi-ml
* supports without specifying a cmd_len by ULD's
*/
#define MAX_COMMAND_SIZE 16
struct scsi_data_buffer {
struct sg_table table;
unsigned length;
};
/* embedded in scsi_cmnd */
struct scsi_pointer {
char *ptr; /* data pointer */
int this_residual; /* left in this buffer */
struct scatterlist *buffer; /* which buffer */
int buffers_residual; /* how many buffers left */
dma_addr_t dma_handle;
volatile int Status;
volatile int Message;
volatile int have_data_in;
volatile int sent_command;
volatile int phase;
};
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
#define SCMD_INITIALIZED (1 << 1)
#define SCMD_LAST (1 << 2)
/*
* libata uses SCSI EH to fetch sense data for successful commands.
* SCSI EH should not overwrite scmd->result when SCMD_FORCE_EH_SUCCESS is set.
*/
#define SCMD_FORCE_EH_SUCCESS (1 << 3)
#define SCMD_FAIL_IF_RECOVERING (1 << 4)
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED | SCMD_FAIL_IF_RECOVERING)
/* for scmd->state */
#define SCMD_STATE_COMPLETE 0
#define SCMD_STATE_INFLIGHT 1
enum scsi_cmnd_submitter {
SUBMITTED_BY_BLOCK_LAYER = 0,
SUBMITTED_BY_SCSI_ERROR_HANDLER = 1,
SUBMITTED_BY_SCSI_RESET_IOCTL = 2,
} __packed;
struct scsi_cmnd {
struct scsi_device *device;
struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
struct delayed_work abort_work;
struct rcu_head rcu;
int eh_eflags; /* Used by error handlr */
int budget_token;
/*
* This is set to jiffies as it was when the command was first
* allocated. It is used to time how long the command has
* been outstanding
*/
unsigned long jiffies_at_alloc;
int retries;
int allowed;
unsigned char prot_op;
unsigned char prot_type;
unsigned char prot_flags;
enum scsi_cmnd_submitter submitter;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
unsigned char cmnd[32]; /* SCSI CDB */
/* These elements define the operation we ultimately want to perform */
struct scsi_data_buffer sdb;
struct scsi_data_buffer *prot_sdb;
unsigned underflow; /* Return error if less than
this amount is transferred */
unsigned transfersize; /* How much we are guaranteed to
transfer with each SCSI transfer
(ie, between disconnect /
reconnects. Probably == sector
size */
unsigned resid_len; /* residual count */
unsigned sense_len;
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
* command (auto-sense). Length must be
* SCSI_SENSE_BUFFERSIZE bytes. */
int flags; /* Command flags */
unsigned long state; /* Command completion state */
unsigned int extra_len; /* length of alignment and padding */
/*
* The fields below can be modified by the LLD but the fields above
* must not be modified.
*/
unsigned char *host_scribble; /* The host adapter is allowed to
* call scsi_malloc and get some memory
* and hang it here. The host adapter
* is also expected to call scsi_free
* to release this memory. (The memory
* obtained by scsi_malloc is guaranteed
* to be at an address < 16Mb). */
int result; /* Status code from lower level driver */
};
/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
{
return blk_mq_rq_from_pdu(scmd);
}
/*
* Return the driver private allocation behind the command.
* Only works if cmd_size is set in the host template.
*/
static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
{
return cmd + 1;
}
void scsi_done(struct scsi_cmnd *cmd);
void scsi_done_direct(struct scsi_cmnd *cmd);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
void scsi_free_sgtables(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
#else /* !CONFIG_SCSI_DMA */
static inline int scsi_dma_map(struct scsi_cmnd *cmd) { return -ENOSYS; }
static inline void scsi_dma_unmap(struct scsi_cmnd *cmd) { }
#endif /* !CONFIG_SCSI_DMA */
static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
{
return cmd->sdb.table.nents;
}
static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
{
return cmd->sdb.table.sgl;
}
static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
{
return cmd->sdb.length;
}
static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
cmd->resid_len = resid;
}
static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
return cmd->resid_len;
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
const void *buf, int buflen)
{
return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
buf, buflen);
}
static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
void *buf, int buflen)
{
return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
buf, buflen);
}
static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd)
{
return blk_rq_pos(scsi_cmd_to_rq(scmd));
}
static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
{
unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift;
}
static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
{
unsigned int shift = ilog2(scmd->device->sector_size);
return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
}
/*
* The operations below are hints that tell the controller driver how
* to handle I/Os with DIF or similar types of protection information.
*/
enum scsi_prot_operations {
/* Normal I/O */
SCSI_PROT_NORMAL = 0,
/* OS-HBA: Protected, HBA-Target: Unprotected */
SCSI_PROT_READ_INSERT,
SCSI_PROT_WRITE_STRIP,
/* OS-HBA: Unprotected, HBA-Target: Protected */
SCSI_PROT_READ_STRIP,
SCSI_PROT_WRITE_INSERT,
/* OS-HBA: Protected, HBA-Target: Protected */
SCSI_PROT_READ_PASS,
SCSI_PROT_WRITE_PASS,
};
static inline void scsi_set_prot_op(struct scsi_cmnd *scmd, unsigned char op)
{
scmd->prot_op = op;
}
static inline unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd)
{
return scmd->prot_op;
}
enum scsi_prot_flags {
SCSI_PROT_TRANSFER_PI = 1 << 0,
SCSI_PROT_GUARD_CHECK = 1 << 1,
SCSI_PROT_REF_CHECK = 1 << 2,
SCSI_PROT_REF_INCREMENT = 1 << 3,
SCSI_PROT_IP_CHECKSUM = 1 << 4,
};
/*
* The controller usually does not know anything about the target it
* is communicating with. However, when DIX is enabled the controller
* must be know target type so it can verify the protection
* information passed along with the I/O.
*/
enum scsi_prot_target_type {
SCSI_PROT_DIF_TYPE0 = 0,
SCSI_PROT_DIF_TYPE1,
SCSI_PROT_DIF_TYPE2,
SCSI_PROT_DIF_TYPE3,
};
static inline void scsi_set_prot_type(struct scsi_cmnd *scmd, unsigned char type)
{
scmd->prot_type = type;
}
static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
{
return scmd->prot_type;
}
static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
{
struct request *rq = blk_mq_rq_from_pdu(scmd);
return t10_pi_ref_tag(rq);
}
static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
{
return scmd->device->sector_size;
}
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
}
static inline struct scatterlist *scsi_prot_sglist(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb ? cmd->prot_sdb->table.sgl : NULL;
}
static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb;
}
#define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
static inline void set_status_byte(struct scsi_cmnd *cmd, char status)
{
cmd->result = (cmd->result & 0xffffff00) | status;
}
static inline u8 get_status_byte(struct scsi_cmnd *cmd)
{
return cmd->result & 0xff;
}
static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
{
cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
}
static inline u8 get_host_byte(struct scsi_cmnd *cmd)
{
return (cmd->result >> 16) & 0xff;
}
/**
* scsi_msg_to_host_byte() - translate message byte
* @cmd: the SCSI command
* @msg: the SCSI parallel message byte to translate
*
* Translate the SCSI parallel message byte to a matching
* host byte setting. A message of COMMAND_COMPLETE indicates
* a successful command execution, any other message indicate
* an error. As the messages themselves only have a meaning
* for the SCSI parallel protocol this function translates
* them into a matching host byte value for SCSI EH.
*/
static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg)
{
switch (msg) {
case COMMAND_COMPLETE:
break;
case ABORT_TASK_SET:
set_host_byte(cmd, DID_ABORT);
break;
case TARGET_RESET:
set_host_byte(cmd, DID_RESET);
break;
default:
set_host_byte(cmd, DID_ERROR);
break;
}
}
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
{
unsigned int xfer_len = scmd->sdb.length;
unsigned int prot_interval = scsi_prot_interval(scmd);
if (scmd->prot_flags & SCSI_PROT_TRANSFER_PI)
xfer_len += (xfer_len >> ilog2(prot_interval)) * 8;
return xfer_len;
}
extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
u8 key, u8 asc, u8 ascq);
struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags);
#endif /* _SCSI_SCSI_CMND_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef __IA_CSS_HB_PARAM_H
#define __IA_CSS_HB_PARAM_H
#include "type_support.h"
#ifndef PIPE_GENERATION
#define __INLINE_HMEM__
#include "hmem.h"
#endif
#include "ia_css_bh_types.h"
/* AE (3A Support) */
struct sh_css_isp_bh_params {
/* coefficients to calculate Y */
s32 y_coef_r;
s32 y_coef_g;
s32 y_coef_b;
};
/* This should be hmem_data_t, but that breaks the pipe generator */
struct sh_css_isp_bh_hmem_params {
u32 bh[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
};
#endif /* __IA_CSS_HB_PARAM_H */
|
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SUBDEV_H__
#define __NVKM_SUBDEV_H__
#include <core/device.h>
enum nvkm_subdev_type {
#define NVKM_LAYOUT_ONCE(t,s,p,...) t,
#define NVKM_LAYOUT_INST NVKM_LAYOUT_ONCE
#include <core/layout.h>
#undef NVKM_LAYOUT_INST
#undef NVKM_LAYOUT_ONCE
NVKM_SUBDEV_NR
};
struct nvkm_subdev {
const struct nvkm_subdev_func *func;
struct nvkm_device *device;
enum nvkm_subdev_type type;
int inst;
char name[16];
u32 debug;
struct {
refcount_t refcount;
struct mutex mutex;
bool enabled;
} use;
struct nvkm_inth inth;
struct list_head head;
void **pself;
bool oneinit;
};
struct nvkm_subdev_func {
void *(*dtor)(struct nvkm_subdev *);
int (*preinit)(struct nvkm_subdev *);
int (*oneinit)(struct nvkm_subdev *);
int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data);
int (*init)(struct nvkm_subdev *);
int (*fini)(struct nvkm_subdev *, bool suspend);
void (*intr)(struct nvkm_subdev *);
};
extern const char *nvkm_subdev_type[NVKM_SUBDEV_NR];
int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, enum nvkm_subdev_type,
int inst, struct nvkm_subdev **);
void __nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
enum nvkm_subdev_type, int inst, struct nvkm_subdev *);
static inline void
nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
{
__nvkm_subdev_ctor(func, device, type, inst, subdev);
mutex_init(&subdev->use.mutex);
}
void nvkm_subdev_disable(struct nvkm_device *, enum nvkm_subdev_type, int inst);
void nvkm_subdev_del(struct nvkm_subdev **);
int nvkm_subdev_ref(struct nvkm_subdev *);
void nvkm_subdev_unref(struct nvkm_subdev *);
int nvkm_subdev_preinit(struct nvkm_subdev *);
int nvkm_subdev_oneinit(struct nvkm_subdev *);
int nvkm_subdev_init(struct nvkm_subdev *);
int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *);
void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */
#define nvkm_printk_ok(s,u,l) \
((CONFIG_NOUVEAU_DEBUG >= (l)) && ((s)->debug >= (l) || ((u) && (u)->debug >= (l))))
#define nvkm_printk___(s,u,l,p,f,a...) do { \
if (nvkm_printk_ok((s), (u), (l))) { \
if ((u) && (u) != (s)) \
dev_##p((s)->device->dev, "%s(%s):"f, (s)->name, (u)->name, ##a); \
else \
dev_##p((s)->device->dev, "%s:"f, (s)->name, ##a); \
} \
} while(0)
#define nvkm_printk__(s,l,p,f,a...) nvkm_printk___((s), (s), (l), p, f, ##a)
#define nvkm_printk_(s,l,p,f,a...) nvkm_printk__((s), (l), p, " "f, ##a)
#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL, crit, f, ##a)
#define nvkm_error(s,f,a...) nvkm_printk((s), ERROR, err, f, ##a)
#define nvkm_warn(s,f,a...) nvkm_printk((s), WARN, notice, f, ##a)
#define nvkm_info(s,f,a...) nvkm_printk((s), INFO, info, f, ##a)
#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG, info, f, ##a)
#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE, info, f, ##a)
#define nvkm_spam(s,f,a...) nvkm_printk((s), SPAM, dbg, f, ##a)
#define nvkm_error_ratelimited(s,f,a...) nvkm_printk((s), ERROR, err_ratelimited, f, ##a)
#endif
|
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_UACCESS_ASM_H__
#define __ASM_UACCESS_ASM_H__
#include <asm/asm-offsets.h>
#include <asm/domain.h>
#include <asm/page.h>
#include <asm/thread_info.h>
.macro csdb
#ifdef CONFIG_THUMB2_KERNEL
.inst.w 0xf3af8014
#else
.inst 0xe320f014
#endif
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcscc \tmp, \tmp, \limit
bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
csdb
#endif
#endif
.endm
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
#ifdef CONFIG_CPU_SPECTRE
sub \tmp, \limit, #1
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
movlo \addr, #0 @ if (tmp < 0) addr = NULL
csdb
#endif
.endm
#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
.macro uaccess_disable, tmp, isb=1
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
.if \isb
instr_sync
.endif
.endm
.macro uaccess_enable, tmp, isb=1
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_ENABLE
mcr p15, 0, \tmp, c3, c0, 0
.if \isb
instr_sync
.endif
.endm
#elif defined(CONFIG_CPU_TTBR0_PAN)
.macro uaccess_disable, tmp, isb=1
/*
* Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
* from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
* addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
*/
mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
orr \tmp, \tmp, #TTBCR_A1
mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
.if \isb
instr_sync
.endif
.endm
.macro uaccess_enable, tmp, isb=1
/*
* Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
* TTBR0 (A1 = 0).
*/
mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
bic \tmp, \tmp, #TTBCR_A1
mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
.if \isb
instr_sync
.endif
.endm
#else
.macro uaccess_disable, tmp, isb=1
.endm
.macro uaccess_enable, tmp, isb=1
.endm
#endif
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
#define DACR(x...) x
#else
#define DACR(x...)
#endif
#ifdef CONFIG_CPU_TTBR0_PAN
#define PAN(x...) x
#else
#define PAN(x...)
#endif
/*
* Save the address limit on entry to a privileged exception.
*
* If we are using the DACR for kernel access by the user accessors
* (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
* back to client mode, whether or not \disable is set.
*
* If we are using SW PAN, set the DACR user domain to no access
* if \disable is set.
*/
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])
PAN( mrc p15, 0, \tmp0, c2, c0, 2)
PAN( str \tmp0, [sp, #SVC_TTBCR])
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp2, c3, c0, 0
instr_sync
.elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
/* kernel=client */
bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
mcr p15, 0, \tmp2, c3, c0, 0
instr_sync
.endif
.endm
/* Restore the user access state previously saved by uaccess_entry */
.macro uaccess_exit, tsk, tmp0, tmp1
DACR( ldr \tmp0, [sp, #SVC_DACR])
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
PAN( ldr \tmp0, [sp, #SVC_TTBCR])
PAN( mcr p15, 0, \tmp0, c2, c0, 2)
.endm
#undef DACR
#undef PAN
#endif /* __ASM_UACCESS_ASM_H__ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PHY drivers for the sungem ethernet driver.
*
* This file could be shared with other drivers.
*
* (c) 2002-2007, Benjamin Herrenscmidt ([email protected])
*
* TODO:
* - Add support for PHYs that provide an IRQ line
* - Eventually moved the entire polling state machine in
* there (out of the eth driver), so that it can easily be
* skipped on PHYs that implement it in hardware.
* - On LXT971 & BCM5201, Apple uses some chip specific regs
* to read the link status. Figure out why and if it makes
* sense to do the same (magic aneg ?)
* - Apple has some additional power management code for some
* Broadcom PHYs that they "hide" from the OpenSource version
* of darwin, still need to reverse engineer that
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/sungem_phy.h>
/* Link modes of the BCM5400 PHY */
static const int phy_BCM5400_link_table[8][3] = {
{ 0, 0, 0 }, /* No link */
{ 0, 0, 0 }, /* 10BT Half Duplex */
{ 1, 0, 0 }, /* 10BT Full Duplex */
{ 0, 1, 0 }, /* 100BT Half Duplex */
{ 0, 1, 0 }, /* 100BT Half Duplex */
{ 1, 1, 0 }, /* 100BT Full Duplex*/
{ 1, 0, 1 }, /* 1000BT */
{ 1, 0, 1 }, /* 1000BT */
};
static inline int __sungem_phy_read(struct mii_phy* phy, int id, int reg)
{
return phy->mdio_read(phy->dev, id, reg);
}
static inline void __sungem_phy_write(struct mii_phy* phy, int id, int reg, int val)
{
phy->mdio_write(phy->dev, id, reg, val);
}
static inline int sungem_phy_read(struct mii_phy* phy, int reg)
{
return phy->mdio_read(phy->dev, phy->mii_id, reg);
}
static inline void sungem_phy_write(struct mii_phy* phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->mii_id, reg, val);
}
static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
{
u16 val;
int limit = 10000;
val = __sungem_phy_read(phy, phy_id, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_PDOWN);
val |= BMCR_RESET;
__sungem_phy_write(phy, phy_id, MII_BMCR, val);
udelay(100);
while (--limit) {
val = __sungem_phy_read(phy, phy_id, MII_BMCR);
if ((val & BMCR_RESET) == 0)
break;
udelay(10);
}
if ((val & BMCR_ISOLATE) && limit > 0)
__sungem_phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
return limit <= 0;
}
static int bcm5201_init(struct mii_phy* phy)
{
u16 data;
data = sungem_phy_read(phy, MII_BCM5201_MULTIPHY);
data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
sungem_phy_write(phy, MII_BCM5201_MULTIPHY, data);
sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0);
return 0;
}
static int bcm5201_suspend(struct mii_phy* phy)
{
sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0);
sungem_phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
return 0;
}
static int bcm5221_init(struct mii_phy* phy)
{
u16 data;
data = sungem_phy_read(phy, MII_BCM5221_TEST);
sungem_phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR);
data = sungem_phy_read(phy, MII_BCM5221_TEST);
sungem_phy_write(phy, MII_BCM5221_TEST,
data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
return 0;
}
static int bcm5221_suspend(struct mii_phy* phy)
{
u16 data;
data = sungem_phy_read(phy, MII_BCM5221_TEST);
sungem_phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE);
return 0;
}
static int bcm5241_init(struct mii_phy* phy)
{
u16 data;
data = sungem_phy_read(phy, MII_BCM5221_TEST);
sungem_phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
data = sungem_phy_read(phy, MII_BCM5221_TEST);
sungem_phy_write(phy, MII_BCM5221_TEST,
data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
return 0;
}
static int bcm5241_suspend(struct mii_phy* phy)
{
u16 data;
data = sungem_phy_read(phy, MII_BCM5221_TEST);
sungem_phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
return 0;
}
static int bcm5400_init(struct mii_phy* phy)
{
u16 data;
/* Configure for gigabit full duplex */
data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL);
data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data);
data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL);
data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data);
udelay(100);
/* Reset and configure cascaded 10/100 PHY */
(void)reset_one_mii_phy(phy, 0x1f);
data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
data |= MII_BCM5201_MULTIPHY_SERIALMODE;
__sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL);
data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data);
return 0;
}
static int bcm5400_suspend(struct mii_phy* phy)
{
#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN);
#endif
return 0;
}
static int bcm5401_init(struct mii_phy* phy)
{
u16 data;
int rev;
rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f;
if (rev == 0 || rev == 3) {
/* Some revisions of 5401 appear to need this
* initialisation sequence to disable, according
* to OF, "tap power management"
*
* WARNING ! OF and Darwin don't agree on the
* register addresses. OF seem to interpret the
* register numbers below as decimal
*
* Note: This should (and does) match tg3_init_5401phy_dsp
* in the tg3.c driver. -DaveM
*/
sungem_phy_write(phy, 0x18, 0x0c20);
sungem_phy_write(phy, 0x17, 0x0012);
sungem_phy_write(phy, 0x15, 0x1804);
sungem_phy_write(phy, 0x17, 0x0013);
sungem_phy_write(phy, 0x15, 0x1204);
sungem_phy_write(phy, 0x17, 0x8006);
sungem_phy_write(phy, 0x15, 0x0132);
sungem_phy_write(phy, 0x17, 0x8006);
sungem_phy_write(phy, 0x15, 0x0232);
sungem_phy_write(phy, 0x17, 0x201f);
sungem_phy_write(phy, 0x15, 0x0a20);
}
/* Configure for gigabit full duplex */
data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL);
data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data);
udelay(10);
/* Reset and configure cascaded 10/100 PHY */
(void)reset_one_mii_phy(phy, 0x1f);
data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
data |= MII_BCM5201_MULTIPHY_SERIALMODE;
__sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
return 0;
}
static int bcm5401_suspend(struct mii_phy* phy)
{
#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN);
#endif
return 0;
}
static int bcm5411_init(struct mii_phy* phy)
{
u16 data;
/* Here's some more Apple black magic to setup
* some voltage stuffs.
*/
sungem_phy_write(phy, 0x1c, 0x8c23);
sungem_phy_write(phy, 0x1c, 0x8ca3);
sungem_phy_write(phy, 0x1c, 0x8c23);
/* Here, Apple seems to want to reset it, do
* it as well
*/
sungem_phy_write(phy, MII_BMCR, BMCR_RESET);
sungem_phy_write(phy, MII_BMCR, 0x1340);
data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL);
data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data);
udelay(10);
/* Reset and configure cascaded 10/100 PHY */
(void)reset_one_mii_phy(phy, 0x1f);
return 0;
}
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
phy->autoneg = 1;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = 0;
phy->advertising = advertise;
/* Setup standard advertise */
adv = sungem_phy_read(phy, MII_ADVERTISE);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
sungem_phy_write(phy, MII_ADVERTISE, adv);
/* Start/Restart aneg */
ctl = sungem_phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
sungem_phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
{
u16 ctl;
phy->autoneg = 0;
phy->speed = speed;
phy->duplex = fd;
phy->pause = 0;
ctl = sungem_phy_read(phy, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
/* First reset the PHY */
sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch(speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
case SPEED_1000:
default:
return -EINVAL;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
sungem_phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_poll_link(struct mii_phy *phy)
{
u16 status;
(void)sungem_phy_read(phy, MII_BMSR);
status = sungem_phy_read(phy, MII_BMSR);
if ((status & BMSR_LSTATUS) == 0)
return 0;
if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
return 0;
return 1;
}
static int genmii_read_link(struct mii_phy *phy)
{
u16 lpa;
if (phy->autoneg) {
lpa = sungem_phy_read(phy, MII_LPA);
if (lpa & (LPA_10FULL | LPA_100FULL))
phy->duplex = DUPLEX_FULL;
else
phy->duplex = DUPLEX_HALF;
if (lpa & (LPA_100FULL | LPA_100HALF))
phy->speed = SPEED_100;
else
phy->speed = SPEED_10;
phy->pause = 0;
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return 0;
}
static int generic_suspend(struct mii_phy* phy)
{
sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN);
return 0;
}
static int bcm5421_init(struct mii_phy* phy)
{
u16 data;
unsigned int id;
id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2));
/* Revision 0 of 5421 needs some fixups */
if (id == 0x002060e0) {
/* This is borrowed from MacOS
*/
sungem_phy_write(phy, 0x18, 0x1007);
data = sungem_phy_read(phy, 0x18);
sungem_phy_write(phy, 0x18, data | 0x0400);
sungem_phy_write(phy, 0x18, 0x0007);
data = sungem_phy_read(phy, 0x18);
sungem_phy_write(phy, 0x18, data | 0x0800);
sungem_phy_write(phy, 0x17, 0x000a);
data = sungem_phy_read(phy, 0x15);
sungem_phy_write(phy, 0x15, data | 0x0200);
}
/* Pick up some init code from OF for K2 version */
if ((id & 0xfffffff0) == 0x002062e0) {
sungem_phy_write(phy, 4, 0x01e1);
sungem_phy_write(phy, 9, 0x0300);
}
/* Check if we can enable automatic low power */
#ifdef CONFIG_PPC_PMAC
if (phy->platform_data) {
struct device_node *np = of_get_parent(phy->platform_data);
int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0;
of_node_put(np);
if (can_low_power) {
/* Enable automatic low-power */
sungem_phy_write(phy, 0x1c, 0x9002);
sungem_phy_write(phy, 0x1c, 0xa821);
sungem_phy_write(phy, 0x1c, 0x941d);
}
}
#endif /* CONFIG_PPC_PMAC */
return 0;
}
static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
phy->autoneg = 1;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = 0;
phy->advertising = advertise;
/* Setup standard advertise */
adv = sungem_phy_read(phy, MII_ADVERTISE);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
sungem_phy_write(phy, MII_ADVERTISE, adv);
/* Setup 1000BT advertise */
adv = sungem_phy_read(phy, MII_1000BASETCONTROL);
adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP);
if (advertise & SUPPORTED_1000baseT_Half)
adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
if (advertise & SUPPORTED_1000baseT_Full)
adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
sungem_phy_write(phy, MII_1000BASETCONTROL, adv);
/* Start/Restart aneg */
ctl = sungem_phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
sungem_phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
{
u16 ctl;
phy->autoneg = 0;
phy->speed = speed;
phy->duplex = fd;
phy->pause = 0;
ctl = sungem_phy_read(phy, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
/* First reset the PHY */
sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch(speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
case SPEED_1000:
ctl |= BMCR_SPD2;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
// XXX Should we set the sungem to GII now on 1000BT ?
sungem_phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int bcm54xx_read_link(struct mii_phy *phy)
{
int link_mode;
u16 val;
if (phy->autoneg) {
val = sungem_phy_read(phy, MII_BCM5400_AUXSTATUS);
link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
DUPLEX_FULL : DUPLEX_HALF;
phy->speed = phy_BCM5400_link_table[link_mode][2] ?
SPEED_1000 :
(phy_BCM5400_link_table[link_mode][1] ?
SPEED_100 : SPEED_10);
val = sungem_phy_read(phy, MII_LPA);
phy->pause = (phy->duplex == DUPLEX_FULL) &&
((val & LPA_PAUSE) != 0);
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return 0;
}
static int marvell88e1111_init(struct mii_phy* phy)
{
u16 rev;
/* magic init sequence for rev 0 */
rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f;
if (rev == 0) {
sungem_phy_write(phy, 0x1d, 0x000a);
sungem_phy_write(phy, 0x1e, 0x0821);
sungem_phy_write(phy, 0x1d, 0x0006);
sungem_phy_write(phy, 0x1e, 0x8600);
sungem_phy_write(phy, 0x1d, 0x000b);
sungem_phy_write(phy, 0x1e, 0x0100);
sungem_phy_write(phy, 0x1d, 0x0004);
sungem_phy_write(phy, 0x1e, 0x4850);
}
return 0;
}
#define BCM5421_MODE_MASK (1 << 5)
static int bcm5421_poll_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
sungem_phy_write(phy, MII_NCONFIG, 0x1000);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
/* try to find out whether we have a link */
sungem_phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
if (phy_reg & 0x0020)
return 0;
else
return 1;
}
static int bcm5421_read_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
sungem_phy_write(phy, MII_NCONFIG, 0x1000);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
if ( mode == BCM54XX_COPPER)
return bcm54xx_read_link(phy);
phy->speed = SPEED_1000;
/* find out whether we are running half- or full duplex */
sungem_phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
if ( (phy_reg & 0x0080) >> 7)
phy->duplex |= DUPLEX_HALF;
else
phy->duplex |= DUPLEX_FULL;
return 0;
}
static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
{
/* enable fiber mode */
sungem_phy_write(phy, MII_NCONFIG, 0x9020);
/* LEDs active in both modes, autosense prio = fiber */
sungem_phy_write(phy, MII_NCONFIG, 0x945f);
if (!autoneg) {
/* switch off fibre autoneg */
sungem_phy_write(phy, MII_NCONFIG, 0xfc01);
sungem_phy_write(phy, 0x0b, 0x0004);
}
phy->autoneg = autoneg;
return 0;
}
#define BCM5461_FIBER_LINK (1 << 2)
#define BCM5461_MODE_MASK (3 << 1)
static int bcm5461_poll_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
sungem_phy_write(phy, MII_NCONFIG, 0x7c00);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
/* find out whether we have a link */
sungem_phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
if (phy_reg & BCM5461_FIBER_LINK)
return 1;
else
return 0;
}
#define BCM5461_FIBER_DUPLEX (1 << 3)
static int bcm5461_read_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
sungem_phy_write(phy, MII_NCONFIG, 0x7c00);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
if ( mode == BCM54XX_COPPER) {
return bcm54xx_read_link(phy);
}
phy->speed = SPEED_1000;
/* find out whether we are running half- or full duplex */
sungem_phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = sungem_phy_read(phy, MII_NCONFIG);
if (phy_reg & BCM5461_FIBER_DUPLEX)
phy->duplex |= DUPLEX_FULL;
else
phy->duplex |= DUPLEX_HALF;
return 0;
}
static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
{
/* select fiber mode, enable 1000 base-X registers */
sungem_phy_write(phy, MII_NCONFIG, 0xfc0b);
if (autoneg) {
/* enable fiber with no autonegotiation */
sungem_phy_write(phy, MII_ADVERTISE, 0x01e0);
sungem_phy_write(phy, MII_BMCR, 0x1140);
} else {
/* enable fiber with autonegotiation */
sungem_phy_write(phy, MII_BMCR, 0x0140);
}
phy->autoneg = autoneg;
return 0;
}
static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
phy->autoneg = 1;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = 0;
phy->advertising = advertise;
/* Setup standard advertise */
adv = sungem_phy_read(phy, MII_ADVERTISE);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
sungem_phy_write(phy, MII_ADVERTISE, adv);
/* Setup 1000BT advertise & enable crossover detect
* XXX How do we advertise 1000BT ? Darwin source is
* confusing here, they read from specific control and
* write to control... Someone has specs for those
* beasts ?
*/
adv = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX;
adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
MII_1000BASETCONTROL_HALFDUPLEXCAP);
if (advertise & SUPPORTED_1000baseT_Half)
adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
if (advertise & SUPPORTED_1000baseT_Full)
adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
sungem_phy_write(phy, MII_1000BASETCONTROL, adv);
/* Start/Restart aneg */
ctl = sungem_phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
sungem_phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
{
u16 ctl, ctl2;
phy->autoneg = 0;
phy->speed = speed;
phy->duplex = fd;
phy->pause = 0;
ctl = sungem_phy_read(phy, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
ctl |= BMCR_RESET;
/* Select speed & duplex */
switch(speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
/* I'm not sure about the one below, again, Darwin source is
* quite confusing and I lack chip specs
*/
case SPEED_1000:
ctl |= BMCR_SPD2;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
/* Disable crossover. Again, the way Apple does it is strange,
* though I don't assume they are wrong ;)
*/
ctl2 = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX |
MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX |
MII_1000BASETCONTROL_FULLDUPLEXCAP |
MII_1000BASETCONTROL_HALFDUPLEXCAP);
if (speed == SPEED_1000)
ctl2 |= (fd == DUPLEX_FULL) ?
MII_1000BASETCONTROL_FULLDUPLEXCAP :
MII_1000BASETCONTROL_HALFDUPLEXCAP;
sungem_phy_write(phy, MII_1000BASETCONTROL, ctl2);
// XXX Should we set the sungem to GII now on 1000BT ?
sungem_phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int marvell_read_link(struct mii_phy *phy)
{
u16 status, pmask;
if (phy->autoneg) {
status = sungem_phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
return -EAGAIN;
if (status & MII_M1011_PHY_SPEC_STATUS_1000)
phy->speed = SPEED_1000;
else if (status & MII_M1011_PHY_SPEC_STATUS_100)
phy->speed = SPEED_100;
else
phy->speed = SPEED_10;
if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
phy->duplex = DUPLEX_FULL;
else
phy->duplex = DUPLEX_HALF;
pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE |
MII_M1011_PHY_SPEC_STATUS_RX_PAUSE;
phy->pause = (status & pmask) == pmask;
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return 0;
}
#define MII_BASIC_FEATURES \
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \
SUPPORTED_Pause)
/* On gigabit capable PHYs, we advertise Pause support but not asym pause
* support for now as I'm not sure it's supported and Darwin doesn't do
* it neither. --BenH.
*/
#define MII_GBIT_FEATURES \
(MII_BASIC_FEATURES | \
SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
/* Broadcom BCM 5201 */
static const struct mii_phy_ops bcm5201_phy_ops = {
.init = bcm5201_init,
.suspend = bcm5201_suspend,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
static const struct mii_phy_def bcm5201_phy_def = {
.phy_id = 0x00406210,
.phy_id_mask = 0xfffffff0,
.name = "BCM5201",
.features = MII_BASIC_FEATURES,
.magic_aneg = 1,
.ops = &bcm5201_phy_ops
};
/* Broadcom BCM 5221 */
static const struct mii_phy_ops bcm5221_phy_ops = {
.suspend = bcm5221_suspend,
.init = bcm5221_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
static const struct mii_phy_def bcm5221_phy_def = {
.phy_id = 0x004061e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5221",
.features = MII_BASIC_FEATURES,
.magic_aneg = 1,
.ops = &bcm5221_phy_ops
};
/* Broadcom BCM 5241 */
static const struct mii_phy_ops bcm5241_phy_ops = {
.suspend = bcm5241_suspend,
.init = bcm5241_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
static const struct mii_phy_def bcm5241_phy_def = {
.phy_id = 0x0143bc30,
.phy_id_mask = 0xfffffff0,
.name = "BCM5241",
.features = MII_BASIC_FEATURES,
.magic_aneg = 1,
.ops = &bcm5241_phy_ops
};
/* Broadcom BCM 5400 */
static const struct mii_phy_ops bcm5400_phy_ops = {
.init = bcm5400_init,
.suspend = bcm5400_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static const struct mii_phy_def bcm5400_phy_def = {
.phy_id = 0x00206040,
.phy_id_mask = 0xfffffff0,
.name = "BCM5400",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5400_phy_ops
};
/* Broadcom BCM 5401 */
static const struct mii_phy_ops bcm5401_phy_ops = {
.init = bcm5401_init,
.suspend = bcm5401_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static const struct mii_phy_def bcm5401_phy_def = {
.phy_id = 0x00206050,
.phy_id_mask = 0xfffffff0,
.name = "BCM5401",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5401_phy_ops
};
/* Broadcom BCM 5411 */
static const struct mii_phy_ops bcm5411_phy_ops = {
.init = bcm5411_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static const struct mii_phy_def bcm5411_phy_def = {
.phy_id = 0x00206070,
.phy_id_mask = 0xfffffff0,
.name = "BCM5411",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5411_phy_ops
};
/* Broadcom BCM 5421 */
static const struct mii_phy_ops bcm5421_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = bcm5421_poll_link,
.read_link = bcm5421_read_link,
.enable_fiber = bcm5421_enable_fiber,
};
static const struct mii_phy_def bcm5421_phy_def = {
.phy_id = 0x002060e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5421_phy_ops
};
/* Broadcom BCM 5421 built-in K2 */
static const struct mii_phy_ops bcm5421k2_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static const struct mii_phy_def bcm5421k2_phy_def = {
.phy_id = 0x002062e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421-K2",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5421k2_phy_ops
};
static const struct mii_phy_ops bcm5461_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = bcm5461_poll_link,
.read_link = bcm5461_read_link,
.enable_fiber = bcm5461_enable_fiber,
};
static const struct mii_phy_def bcm5461_phy_def = {
.phy_id = 0x002060c0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5461",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5461_phy_ops
};
/* Broadcom BCM 5462 built-in Vesta */
static const struct mii_phy_ops bcm5462V_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static const struct mii_phy_def bcm5462V_phy_def = {
.phy_id = 0x002060d0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5462-Vesta",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5462V_phy_ops
};
/* Marvell 88E1101 amd 88E1111 */
static const struct mii_phy_ops marvell88e1101_phy_ops = {
.suspend = generic_suspend,
.setup_aneg = marvell_setup_aneg,
.setup_forced = marvell_setup_forced,
.poll_link = genmii_poll_link,
.read_link = marvell_read_link
};
static const struct mii_phy_ops marvell88e1111_phy_ops = {
.init = marvell88e1111_init,
.suspend = generic_suspend,
.setup_aneg = marvell_setup_aneg,
.setup_forced = marvell_setup_forced,
.poll_link = genmii_poll_link,
.read_link = marvell_read_link
};
/* two revs in darwin for the 88e1101 ... I could use a datasheet
* to get the proper names...
*/
static const struct mii_phy_def marvell88e1101v1_phy_def = {
.phy_id = 0x01410c20,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v1",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
static const struct mii_phy_def marvell88e1101v2_phy_def = {
.phy_id = 0x01410c60,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v2",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
static const struct mii_phy_def marvell88e1111_phy_def = {
.phy_id = 0x01410cc0,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1111",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &marvell88e1111_phy_ops
};
/* Generic implementation for most 10/100 PHYs */
static const struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static const struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
.features = MII_BASIC_FEATURES,
.magic_aneg = 0,
.ops = &generic_phy_ops
};
static const struct mii_phy_def *mii_phy_table[] = {
&bcm5201_phy_def,
&bcm5221_phy_def,
&bcm5241_phy_def,
&bcm5400_phy_def,
&bcm5401_phy_def,
&bcm5411_phy_def,
&bcm5421_phy_def,
&bcm5421k2_phy_def,
&bcm5461_phy_def,
&bcm5462V_phy_def,
&marvell88e1101v1_phy_def,
&marvell88e1101v2_phy_def,
&marvell88e1111_phy_def,
&genmii_phy_def,
NULL
};
int sungem_phy_probe(struct mii_phy *phy, int mii_id)
{
const struct mii_phy_def *def;
int rc;
u32 id;
int i;
/* We do not reset the mii_phy structure as the driver
* may re-probe the PHY regulary
*/
phy->mii_id = mii_id;
/* Take PHY out of isloate mode and reset it. */
rc = reset_one_mii_phy(phy, mii_id);
if (rc)
goto fail;
/* Read ID and find matching entry */
id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2));
printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
id, mii_id);
for (i=0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id)
break;
/* Should never be NULL (we have a generic entry), but... */
if (def == NULL)
goto fail;
phy->def = def;
return 0;
fail:
phy->speed = 0;
phy->duplex = 0;
phy->pause = 0;
phy->advertising = 0;
return -ENODEV;
}
EXPORT_SYMBOL(sungem_phy_probe);
MODULE_DESCRIPTION("PHY drivers for the sungem Ethernet MAC driver");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _INTEL_REGION_TTM_H_
#define _INTEL_REGION_TTM_H_
#include <linux/types.h>
#include "i915_selftest.h"
struct drm_i915_private;
struct intel_memory_region;
struct ttm_resource;
struct ttm_device_funcs;
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv);
int intel_region_ttm_init(struct intel_memory_region *mem);
int intel_region_ttm_fini(struct intel_memory_region *mem);
struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
struct ttm_resource *res,
u32 page_alignment);
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource *res);
int intel_region_to_ttm_type(const struct intel_memory_region *mem);
struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size,
unsigned int flags);
#endif
#endif /* _INTEL_REGION_TTM_H_ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 BayLibre, SAS.
* Author: Jerome Brunet <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <sound/soc.h>
#define DRV_NAME "simple-amplifier"
struct simple_amp {
struct gpio_desc *gpiod_enable;
};
static int drv_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *control, int event)
{
struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
struct simple_amp *priv = snd_soc_component_get_drvdata(c);
int val;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
val = 1;
break;
case SND_SOC_DAPM_PRE_PMD:
val = 0;
break;
default:
WARN(1, "Unexpected event");
return -EINVAL;
}
gpiod_set_value_cansleep(priv->gpiod_enable, val);
return 0;
}
static const struct snd_soc_dapm_widget simple_amp_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("INL"),
SND_SOC_DAPM_INPUT("INR"),
SND_SOC_DAPM_OUT_DRV_E("DRV", SND_SOC_NOPM, 0, 0, NULL, 0, drv_event,
(SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)),
SND_SOC_DAPM_OUTPUT("OUTL"),
SND_SOC_DAPM_OUTPUT("OUTR"),
SND_SOC_DAPM_REGULATOR_SUPPLY("VCC", 20, 0),
};
static const struct snd_soc_dapm_route simple_amp_dapm_routes[] = {
{ "DRV", NULL, "INL" },
{ "DRV", NULL, "INR" },
{ "OUTL", NULL, "VCC" },
{ "OUTR", NULL, "VCC" },
{ "OUTL", NULL, "DRV" },
{ "OUTR", NULL, "DRV" },
};
static const struct snd_soc_component_driver simple_amp_component_driver = {
.dapm_widgets = simple_amp_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(simple_amp_dapm_widgets),
.dapm_routes = simple_amp_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(simple_amp_dapm_routes),
};
static int simple_amp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct simple_amp *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
priv->gpiod_enable = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_enable))
return dev_err_probe(dev, PTR_ERR(priv->gpiod_enable),
"Failed to get 'enable' gpio");
return devm_snd_soc_register_component(dev,
&simple_amp_component_driver,
NULL, 0);
}
#ifdef CONFIG_OF
static const struct of_device_id simple_amp_ids[] = {
{ .compatible = "dioo,dio2125", },
{ .compatible = "simple-audio-amplifier", },
{ }
};
MODULE_DEVICE_TABLE(of, simple_amp_ids);
#endif
static struct platform_driver simple_amp_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(simple_amp_ids),
},
.probe = simple_amp_probe,
};
module_platform_driver(simple_amp_driver);
MODULE_DESCRIPTION("ASoC Simple Audio Amplifier driver");
MODULE_AUTHOR("Jerome Brunet <[email protected]>");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CMPXCHG_GRB_H
#define __ASM_SH_CMPXCHG_GRB_H
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-4, r15 \n\t" /* LOGIN */
" mov.l @%1, %0 \n\t" /* load old value */
" mov.l %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory", "r0", "r1");
return retval;
}
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN */
" mov.w @%1, %0 \n\t" /* load old value */
" extu.w %0, %0 \n\t" /* extend as unsigned */
" mov.w %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory" , "r0", "r1");
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN */
" mov.b @%1, %0 \n\t" /* load old value */
" extu.b %0, %0 \n\t" /* extend as unsigned */
" mov.b %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory" , "r0", "r1");
return retval;
}
static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
unsigned long new)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-8, r15 \n\t" /* LOGIN */
" mov.l @%3, %0 \n\t" /* load old value */
" cmp/eq %0, %1 \n\t"
" bf 1f \n\t" /* if not equal */
" mov.l %2, @%3 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (old), "+r" (new) /* old or new can be r15 */
: "r" (m)
: "memory" , "r0", "r1", "t");
return retval;
}
#endif /* __ASM_SH_CMPXCHG_GRB_H */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#ifndef ML26124_H
#define ML26124_H
/* Clock Control Register */
#define ML26124_SMPLING_RATE 0x00
#define ML26124_PLLNL 0x02
#define ML26124_PLLNH 0x04
#define ML26124_PLLML 0x06
#define ML26124_PLLMH 0x08
#define ML26124_PLLDIV 0x0a
#define ML26124_CLK_EN 0x0c
#define ML26124_CLK_CTL 0x0e
/* System Control Register */
#define ML26124_SW_RST 0x10
#define ML26124_REC_PLYBAK_RUN 0x12
#define ML26124_MIC_TIM 0x14
/* Power Mnagement Register */
#define ML26124_PW_REF_PW_MNG 0x20
#define ML26124_PW_IN_PW_MNG 0x22
#define ML26124_PW_DAC_PW_MNG 0x24
#define ML26124_PW_SPAMP_PW_MNG 0x26
#define ML26124_PW_LOUT_PW_MNG 0x28
#define ML26124_PW_VOUT_PW_MNG 0x2a
#define ML26124_PW_ZCCMP_PW_MNG 0x2e
/* Analog Reference Control Register */
#define ML26124_PW_MICBIAS_VOL 0x30
/* Input/Output Amplifier Control Register */
#define ML26124_PW_MIC_IN_VOL 0x32
#define ML26124_PW_MIC_BOST_VOL 0x38
#define ML26124_PW_SPK_AMP_VOL 0x3a
#define ML26124_PW_AMP_VOL_FUNC 0x48
#define ML26124_PW_AMP_VOL_FADE 0x4a
/* Analog Path Control Register */
#define ML26124_SPK_AMP_OUT 0x54
#define ML26124_MIC_IF_CTL 0x5a
#define ML26124_MIC_SELECT 0xe8
/* Audio Interface Control Register */
#define ML26124_SAI_TRANS_CTL 0x60
#define ML26124_SAI_RCV_CTL 0x62
#define ML26124_SAI_MODE_SEL 0x64
/* DSP Control Register */
#define ML26124_FILTER_EN 0x66
#define ML26124_DVOL_CTL 0x68
#define ML26124_MIXER_VOL_CTL 0x6a
#define ML26124_RECORD_DIG_VOL 0x6c
#define ML26124_PLBAK_DIG_VOL 0x70
#define ML26124_DIGI_BOOST_VOL 0x72
#define ML26124_EQ_GAIN_BRAND0 0x74
#define ML26124_EQ_GAIN_BRAND1 0x76
#define ML26124_EQ_GAIN_BRAND2 0x78
#define ML26124_EQ_GAIN_BRAND3 0x7a
#define ML26124_EQ_GAIN_BRAND4 0x7c
#define ML26124_HPF2_CUTOFF 0x7e
#define ML26124_EQBRAND0_F0L 0x80
#define ML26124_EQBRAND0_F0H 0x82
#define ML26124_EQBRAND0_F1L 0x84
#define ML26124_EQBRAND0_F1H 0x86
#define ML26124_EQBRAND1_F0L 0x88
#define ML26124_EQBRAND1_F0H 0x8a
#define ML26124_EQBRAND1_F1L 0x8c
#define ML26124_EQBRAND1_F1H 0x8e
#define ML26124_EQBRAND2_F0L 0x90
#define ML26124_EQBRAND2_F0H 0x92
#define ML26124_EQBRAND2_F1L 0x94
#define ML26124_EQBRAND2_F1H 0x96
#define ML26124_EQBRAND3_F0L 0x98
#define ML26124_EQBRAND3_F0H 0x9a
#define ML26124_EQBRAND3_F1L 0x9c
#define ML26124_EQBRAND3_F1H 0x9e
#define ML26124_EQBRAND4_F0L 0xa0
#define ML26124_EQBRAND4_F0H 0xa2
#define ML26124_EQBRAND4_F1L 0xa4
#define ML26124_EQBRAND4_F1H 0xa6
/* ALC Control Register */
#define ML26124_ALC_MODE 0xb0
#define ML26124_ALC_ATTACK_TIM 0xb2
#define ML26124_ALC_DECAY_TIM 0xb4
#define ML26124_ALC_HOLD_TIM 0xb6
#define ML26124_ALC_TARGET_LEV 0xb8
#define ML26124_ALC_MAXMIN_GAIN 0xba
#define ML26124_NOIS_GATE_THRSH 0xbc
#define ML26124_ALC_ZERO_TIMOUT 0xbe
/* Playback Limiter Control Register */
#define ML26124_PL_ATTACKTIME 0xc0
#define ML26124_PL_DECAYTIME 0xc2
#define ML26124_PL_TARGETTIME 0xc4
#define ML26124_PL_MAXMIN_GAIN 0xc6
#define ML26124_PLYBAK_BOST_VOL 0xc8
#define ML26124_PL_0CROSS_TIMOUT 0xca
/* Video Amplifer Control Register */
#define ML26124_VIDEO_AMP_GAIN_CTL 0xd0
#define ML26124_VIDEO_AMP_SETUP1 0xd2
#define ML26124_VIDEO_AMP_CTL2 0xd4
/* Clock select for machine driver */
#define ML26124_USE_PLL 0
#define ML26124_USE_MCLKI_256FS 1
#define ML26124_USE_MCLKI_512FS 2
#define ML26124_USE_MCLKI_1024FS 3
/* Register Mask */
#define ML26124_R0_MASK 0xf
#define ML26124_R2_MASK 0xff
#define ML26124_R4_MASK 0x1
#define ML26124_R6_MASK 0xf
#define ML26124_R8_MASK 0x3f
#define ML26124_Ra_MASK 0x1f
#define ML26124_Rc_MASK 0x1f
#define ML26124_Re_MASK 0x7
#define ML26124_R10_MASK 0x1
#define ML26124_R12_MASK 0x17
#define ML26124_R14_MASK 0x3f
#define ML26124_R20_MASK 0x47
#define ML26124_R22_MASK 0xa
#define ML26124_R24_MASK 0x2
#define ML26124_R26_MASK 0x1f
#define ML26124_R28_MASK 0x2
#define ML26124_R2a_MASK 0x2
#define ML26124_R2e_MASK 0x2
#define ML26124_R30_MASK 0x7
#define ML26124_R32_MASK 0x3f
#define ML26124_R38_MASK 0x38
#define ML26124_R3a_MASK 0x3f
#define ML26124_R48_MASK 0x3
#define ML26124_R4a_MASK 0x7
#define ML26124_R54_MASK 0x2a
#define ML26124_R5a_MASK 0x3
#define ML26124_Re8_MASK 0x3
#define ML26124_R60_MASK 0xff
#define ML26124_R62_MASK 0xff
#define ML26124_R64_MASK 0x1
#define ML26124_R66_MASK 0xff
#define ML26124_R68_MASK 0x3b
#define ML26124_R6a_MASK 0xf3
#define ML26124_R6c_MASK 0xff
#define ML26124_R70_MASK 0xff
#define ML26124_MCLKEN BIT(0)
#define ML26124_PLLEN BIT(1)
#define ML26124_PLLOE BIT(2)
#define ML26124_MCLKOE BIT(3)
#define ML26124_BLT_ALL_ON 0x1f
#define ML26124_BLT_PREAMP_ON 0x13
#define ML26124_MICBEN_ON BIT(2)
enum ml26124_regs {
ML26124_MCLK = 0,
};
enum ml26124_clk_in {
ML26124_USE_PLLOUT = 0,
ML26124_USE_MCLKI,
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
#ifndef _LM_H_
#define _LM_H_
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_adminq.h>
struct pds_vfio_lm_file {
struct file *filep;
struct mutex lock; /* protect live migration data file */
u64 size; /* Size with valid data */
u64 alloc_size; /* Total allocated size. Always >= len */
void *page_mem; /* memory allocated for pages */
struct page **pages; /* Backing pages for file */
unsigned long long npages;
struct sg_table sg_table; /* SG table for backing pages */
struct pds_lm_sg_elem *sgl; /* DMA mapping */
dma_addr_t sgl_addr;
u16 num_sge;
struct scatterlist *last_offset_sg; /* Iterator */
unsigned int sg_last_entry;
unsigned long last_offset;
bool disabled;
};
struct pds_vfio_pci_device;
struct file *
pds_vfio_step_device_state_locked(struct pds_vfio_pci_device *pds_vfio,
enum vfio_device_mig_state next);
void pds_vfio_put_save_file(struct pds_vfio_pci_device *pds_vfio);
void pds_vfio_put_restore_file(struct pds_vfio_pci_device *pds_vfio);
#endif /* _LM_H_ */
|
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Limited
*/
#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_BLAIR_H
#define _DT_BINDINGS_CLK_QCOM_GPU_CC_BLAIR_H
/* GPU CC clocks */
#define GPU_CC_PLL0 0
#define GPU_CC_PLL1 1
#define GPU_CC_AHB_CLK 2
#define GPU_CC_CX_GFX3D_CLK 3
#define GPU_CC_CX_GFX3D_SLV_CLK 4
#define GPU_CC_CX_GMU_CLK 5
#define GPU_CC_CX_SNOC_DVM_CLK 6
#define GPU_CC_CXO_AON_CLK 7
#define GPU_CC_CXO_CLK 8
#define GPU_CC_GMU_CLK_SRC 9
#define GPU_CC_GX_CXO_CLK 10
#define GPU_CC_GX_GFX3D_CLK 11
#define GPU_CC_GX_GFX3D_CLK_SRC 12
#define GPU_CC_GX_GMU_CLK 13
#define GPU_CC_SLEEP_CLK 14
/* GDSCs */
#define GPU_CX_GDSC 0
#define GPU_GX_GDSC 1
/* Resets */
#define GPU_GX_BCR 0
#define GPU_ACD_BCR 1
#define GPU_GX_ACD_MISC_BCR 2
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* HT16K33 driver
*
* Author: Robin van der Gracht <[email protected]>
*
* Copyright: (C) 2016 Protonic Holland.
* Copyright (C) 2021 Glider bv
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/property.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/container_of.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
#include <linux/mm.h>
#include <linux/map_to_7segment.h>
#include <linux/map_to_14segment.h>
#include <linux/unaligned.h>
#include "line-display.h"
/* Registers */
#define REG_SYSTEM_SETUP 0x20
#define REG_SYSTEM_SETUP_OSC_ON BIT(0)
#define REG_DISPLAY_SETUP 0x80
#define REG_DISPLAY_SETUP_ON BIT(0)
#define REG_DISPLAY_SETUP_BLINK_OFF (0 << 1)
#define REG_DISPLAY_SETUP_BLINK_2HZ (1 << 1)
#define REG_DISPLAY_SETUP_BLINK_1HZ (2 << 1)
#define REG_DISPLAY_SETUP_BLINK_0HZ5 (3 << 1)
#define REG_ROWINT_SET 0xA0
#define REG_ROWINT_SET_INT_EN BIT(0)
#define REG_ROWINT_SET_INT_ACT_HIGH BIT(1)
#define REG_BRIGHTNESS 0xE0
/* Defines */
#define DRIVER_NAME "ht16k33"
#define MIN_BRIGHTNESS 0x1
#define MAX_BRIGHTNESS 0x10
#define HT16K33_MATRIX_LED_MAX_COLS 8
#define HT16K33_MATRIX_LED_MAX_ROWS 16
#define HT16K33_MATRIX_KEYPAD_MAX_COLS 3
#define HT16K33_MATRIX_KEYPAD_MAX_ROWS 12
#define BYTES_PER_ROW (HT16K33_MATRIX_LED_MAX_ROWS / 8)
#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
enum display_type {
DISP_MATRIX = 0,
DISP_QUAD_7SEG,
DISP_QUAD_14SEG,
};
struct ht16k33_keypad {
struct i2c_client *client;
struct input_dev *dev;
uint32_t cols;
uint32_t rows;
uint32_t row_shift;
uint32_t debounce_ms;
uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
wait_queue_head_t wait;
bool stopped;
};
struct ht16k33_fbdev {
struct fb_info *info;
uint32_t refresh_rate;
uint8_t *buffer;
uint8_t *cache;
};
struct ht16k33_priv {
struct i2c_client *client;
struct delayed_work work;
struct led_classdev led;
struct ht16k33_keypad keypad;
union {
struct ht16k33_fbdev fbdev;
struct linedisp linedisp;
};
enum display_type type;
uint8_t blink;
};
#define ht16k33_work_to_priv(p) \
container_of(p, struct ht16k33_priv, work.work)
#define ht16k33_led_to_priv(p) \
container_of(p, struct ht16k33_priv, led)
#define ht16k33_linedisp_to_priv(p) \
container_of(p, struct ht16k33_priv, linedisp)
static const struct fb_fix_screeninfo ht16k33_fb_fix = {
.id = DRIVER_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.line_length = HT16K33_MATRIX_LED_MAX_ROWS,
.accel = FB_ACCEL_NONE,
};
static const struct fb_var_screeninfo ht16k33_fb_var = {
.xres = HT16K33_MATRIX_LED_MAX_ROWS,
.yres = HT16K33_MATRIX_LED_MAX_COLS,
.xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
.yres_virtual = HT16K33_MATRIX_LED_MAX_COLS,
.bits_per_pixel = 1,
.red = { 0, 1, 0 },
.green = { 0, 1, 0 },
.blue = { 0, 1, 0 },
.left_margin = 0,
.right_margin = 0,
.upper_margin = 0,
.lower_margin = 0,
.vmode = FB_VMODE_NONINTERLACED,
};
static int ht16k33_display_on(struct ht16k33_priv *priv)
{
uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON | priv->blink;
return i2c_smbus_write_byte(priv->client, data);
}
static int ht16k33_display_off(struct ht16k33_priv *priv)
{
return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP);
}
static int ht16k33_brightness_set(struct ht16k33_priv *priv,
unsigned int brightness)
{
int err;
if (brightness == 0) {
priv->blink = REG_DISPLAY_SETUP_BLINK_OFF;
return ht16k33_display_off(priv);
}
err = ht16k33_display_on(priv);
if (err)
return err;
return i2c_smbus_write_byte(priv->client,
REG_BRIGHTNESS | (brightness - 1));
}
static int ht16k33_brightness_set_blocking(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct ht16k33_priv *priv = ht16k33_led_to_priv(led_cdev);
return ht16k33_brightness_set(priv, brightness);
}
static int ht16k33_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
struct ht16k33_priv *priv = ht16k33_led_to_priv(led_cdev);
unsigned int delay;
uint8_t blink;
int err;
if (!*delay_on && !*delay_off) {
blink = REG_DISPLAY_SETUP_BLINK_1HZ;
delay = 1000;
} else if (*delay_on <= 750) {
blink = REG_DISPLAY_SETUP_BLINK_2HZ;
delay = 500;
} else if (*delay_on <= 1500) {
blink = REG_DISPLAY_SETUP_BLINK_1HZ;
delay = 1000;
} else {
blink = REG_DISPLAY_SETUP_BLINK_0HZ5;
delay = 2000;
}
err = i2c_smbus_write_byte(priv->client,
REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON |
blink);
if (err)
return err;
priv->blink = blink;
*delay_on = *delay_off = delay;
return 0;
}
static void ht16k33_fb_queue(struct ht16k33_priv *priv)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
schedule_delayed_work(&priv->work, HZ / fbdev->refresh_rate);
}
/*
* This gets the fb data from cache and copies it to ht16k33 display RAM
*/
static void ht16k33_fb_update(struct work_struct *work)
{
struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
struct ht16k33_fbdev *fbdev = &priv->fbdev;
uint8_t *p1, *p2;
int len, pos = 0, first = -1;
p1 = fbdev->cache;
p2 = fbdev->buffer;
/* Search for the first byte with changes */
while (pos < HT16K33_FB_SIZE && first < 0) {
if (*(p1++) - *(p2++))
first = pos;
pos++;
}
/* No changes found */
if (first < 0)
goto requeue;
len = HT16K33_FB_SIZE - first;
p1 = fbdev->cache + HT16K33_FB_SIZE - 1;
p2 = fbdev->buffer + HT16K33_FB_SIZE - 1;
/* Determine i2c transfer length */
while (len > 1) {
if (*(p1--) - *(p2--))
break;
len--;
}
p1 = fbdev->cache + first;
p2 = fbdev->buffer + first;
if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2))
memcpy(p1, p2, len);
requeue:
ht16k33_fb_queue(priv);
}
static int ht16k33_initialize(struct ht16k33_priv *priv)
{
uint8_t data[HT16K33_FB_SIZE];
uint8_t byte;
int err;
/* Clear RAM (8 * 16 bits) */
memset(data, 0, sizeof(data));
err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data);
if (err)
return err;
/* Turn on internal oscillator */
byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP;
err = i2c_smbus_write_byte(priv->client, byte);
if (err)
return err;
/* Configure INT pin */
byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH;
if (priv->client->irq > 0)
byte |= REG_ROWINT_SET_INT_EN;
return i2c_smbus_write_byte(priv->client, byte);
}
static int ht16k33_bl_update_status(struct backlight_device *bl)
{
const int brightness = backlight_get_brightness(bl);
struct ht16k33_priv *priv = bl_get_data(bl);
return ht16k33_brightness_set(priv, brightness);
}
static const struct backlight_ops ht16k33_bl_ops = {
.update_status = ht16k33_bl_update_status,
};
/*
* Blank events will be passed to the actual device handling the backlight when
* we return zero here.
*/
static int ht16k33_blank(int blank, struct fb_info *info)
{
return 0;
}
static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct ht16k33_priv *priv = info->par;
struct page *pages = virt_to_page(priv->fbdev.buffer);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops ht16k33_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_blank = ht16k33_blank,
__FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ht16k33_mmap,
};
/*
* This gets the keys from keypad and reports it to input subsystem.
* Returns true if a key is pressed.
*/
static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
{
const unsigned short *keycodes = keypad->dev->keycode;
u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
__le16 data[HT16K33_MATRIX_KEYPAD_MAX_COLS];
unsigned long bits_changed;
int row, col, code;
int rc;
bool pressed = false;
rc = i2c_smbus_read_i2c_block_data(keypad->client, 0x40,
sizeof(data), (u8 *)data);
if (rc != sizeof(data)) {
dev_err(&keypad->client->dev,
"Failed to read key data, rc=%d\n", rc);
return false;
}
for (col = 0; col < keypad->cols; col++) {
new_state[col] = le16_to_cpu(data[col]);
if (new_state[col])
pressed = true;
bits_changed = keypad->last_key_state[col] ^ new_state[col];
for_each_set_bit(row, &bits_changed, BITS_PER_LONG) {
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
input_report_key(keypad->dev, keycodes[code],
new_state[col] & BIT(row));
}
}
input_sync(keypad->dev);
memcpy(keypad->last_key_state, new_state, sizeof(u16) * keypad->cols);
return pressed;
}
static irqreturn_t ht16k33_keypad_irq_thread(int irq, void *dev)
{
struct ht16k33_keypad *keypad = dev;
do {
wait_event_timeout(keypad->wait, keypad->stopped,
msecs_to_jiffies(keypad->debounce_ms));
if (keypad->stopped)
break;
} while (ht16k33_keypad_scan(keypad));
return IRQ_HANDLED;
}
static int ht16k33_keypad_start(struct input_dev *dev)
{
struct ht16k33_keypad *keypad = input_get_drvdata(dev);
keypad->stopped = false;
mb();
enable_irq(keypad->client->irq);
return 0;
}
static void ht16k33_keypad_stop(struct input_dev *dev)
{
struct ht16k33_keypad *keypad = input_get_drvdata(dev);
keypad->stopped = true;
mb();
wake_up(&keypad->wait);
disable_irq(keypad->client->irq);
}
static void ht16k33_seg7_update(struct work_struct *work)
{
struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
struct linedisp_map *map = priv->linedisp.map;
char *s = priv->linedisp.buf;
uint8_t buf[9];
buf[0] = map_to_seg7(&map->map.seg7, *s++);
buf[1] = 0;
buf[2] = map_to_seg7(&map->map.seg7, *s++);
buf[3] = 0;
buf[4] = 0;
buf[5] = 0;
buf[6] = map_to_seg7(&map->map.seg7, *s++);
buf[7] = 0;
buf[8] = map_to_seg7(&map->map.seg7, *s++);
i2c_smbus_write_i2c_block_data(priv->client, 0, ARRAY_SIZE(buf), buf);
}
static void ht16k33_seg14_update(struct work_struct *work)
{
struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
struct linedisp_map *map = priv->linedisp.map;
char *s = priv->linedisp.buf;
uint8_t buf[8];
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 0);
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 2);
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 4);
put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 6);
i2c_smbus_write_i2c_block_data(priv->client, 0, ARRAY_SIZE(buf), buf);
}
static int ht16k33_linedisp_get_map_type(struct linedisp *linedisp)
{
struct ht16k33_priv *priv = ht16k33_linedisp_to_priv(linedisp);
switch (priv->type) {
case DISP_QUAD_7SEG:
INIT_DELAYED_WORK(&priv->work, ht16k33_seg7_update);
return LINEDISP_MAP_SEG7;
case DISP_QUAD_14SEG:
INIT_DELAYED_WORK(&priv->work, ht16k33_seg14_update);
return LINEDISP_MAP_SEG14;
default:
return -EINVAL;
}
}
static void ht16k33_linedisp_update(struct linedisp *linedisp)
{
struct ht16k33_priv *priv = ht16k33_linedisp_to_priv(linedisp);
schedule_delayed_work(&priv->work, 0);
}
static const struct linedisp_ops ht16k33_linedisp_ops = {
.get_map_type = ht16k33_linedisp_get_map_type,
.update = ht16k33_linedisp_update,
};
static int ht16k33_led_probe(struct device *dev, struct led_classdev *led,
unsigned int brightness)
{
struct led_init_data init_data = {};
int err;
/* The LED is optional */
init_data.fwnode = device_get_named_child_node(dev, "led");
if (!init_data.fwnode)
return 0;
init_data.devicename = "auxdisplay";
init_data.devname_mandatory = true;
led->brightness_set_blocking = ht16k33_brightness_set_blocking;
led->blink_set = ht16k33_blink_set;
led->flags = LED_CORE_SUSPENDRESUME;
led->brightness = brightness;
led->max_brightness = MAX_BRIGHTNESS;
err = devm_led_classdev_register_ext(dev, led, &init_data);
fwnode_handle_put(init_data.fwnode);
if (err)
dev_err(dev, "Failed to register LED\n");
return err;
}
static int ht16k33_keypad_probe(struct i2c_client *client,
struct ht16k33_keypad *keypad)
{
struct device *dev = &client->dev;
u32 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
u32 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
int err;
keypad->client = client;
init_waitqueue_head(&keypad->wait);
keypad->dev = devm_input_allocate_device(dev);
if (!keypad->dev)
return -ENOMEM;
input_set_drvdata(keypad->dev, keypad);
keypad->dev->name = DRIVER_NAME"-keypad";
keypad->dev->id.bustype = BUS_I2C;
keypad->dev->open = ht16k33_keypad_start;
keypad->dev->close = ht16k33_keypad_stop;
if (!device_property_read_bool(dev, "linux,no-autorepeat"))
__set_bit(EV_REP, keypad->dev->evbit);
err = device_property_read_u32(dev, "debounce-delay-ms",
&keypad->debounce_ms);
if (err) {
dev_err(dev, "key debounce delay not specified\n");
return err;
}
err = matrix_keypad_parse_properties(dev, &rows, &cols);
if (err)
return err;
if (rows > HT16K33_MATRIX_KEYPAD_MAX_ROWS ||
cols > HT16K33_MATRIX_KEYPAD_MAX_COLS) {
dev_err(dev, "%u rows or %u cols out of range in DT\n", rows,
cols);
return -ERANGE;
}
keypad->rows = rows;
keypad->cols = cols;
keypad->row_shift = get_count_order(cols);
err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
keypad->dev);
if (err) {
dev_err(dev, "failed to build keymap\n");
return err;
}
err = devm_request_threaded_irq(dev, client->irq, NULL,
ht16k33_keypad_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
DRIVER_NAME, keypad);
if (err) {
dev_err(dev, "irq request failed %d, error %d\n", client->irq,
err);
return err;
}
ht16k33_keypad_stop(keypad->dev);
return input_register_device(keypad->dev);
}
static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
uint32_t brightness)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
struct backlight_device *bl = NULL;
int err;
if (priv->led.dev) {
err = ht16k33_brightness_set(priv, brightness);
if (err)
return err;
} else {
/* backwards compatibility with DT lacking an led subnode */
struct backlight_properties bl_props;
memset(&bl_props, 0, sizeof(struct backlight_properties));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = MAX_BRIGHTNESS;
bl = devm_backlight_device_register(dev, DRIVER_NAME"-bl", dev,
priv, &ht16k33_bl_ops,
&bl_props);
if (IS_ERR(bl)) {
dev_err(dev, "failed to register backlight\n");
return PTR_ERR(bl);
}
bl->props.brightness = brightness;
ht16k33_bl_update_status(bl);
}
/* Framebuffer (2 bytes per column) */
BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
if (!fbdev->buffer)
return -ENOMEM;
fbdev->cache = devm_kmalloc(dev, HT16K33_FB_SIZE, GFP_KERNEL);
if (!fbdev->cache) {
err = -ENOMEM;
goto err_fbdev_buffer;
}
fbdev->info = framebuffer_alloc(0, dev);
if (!fbdev->info) {
err = -ENOMEM;
goto err_fbdev_buffer;
}
err = device_property_read_u32(dev, "refresh-rate-hz",
&fbdev->refresh_rate);
if (err) {
dev_err(dev, "refresh rate not specified\n");
goto err_fbdev_info;
}
fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS);
INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update);
fbdev->info->fbops = &ht16k33_fb_ops;
fbdev->info->flags |= FBINFO_VIRTFB;
fbdev->info->screen_buffer = fbdev->buffer;
fbdev->info->screen_size = HT16K33_FB_SIZE;
fbdev->info->fix = ht16k33_fb_fix;
fbdev->info->var = ht16k33_fb_var;
fbdev->info->bl_dev = bl;
fbdev->info->pseudo_palette = NULL;
fbdev->info->par = priv;
err = register_framebuffer(fbdev->info);
if (err)
goto err_fbdev_info;
ht16k33_fb_queue(priv);
return 0;
err_fbdev_info:
framebuffer_release(fbdev->info);
err_fbdev_buffer:
free_page((unsigned long) fbdev->buffer);
return err;
}
static int ht16k33_seg_probe(struct device *dev, struct ht16k33_priv *priv,
uint32_t brightness)
{
struct linedisp *linedisp = &priv->linedisp;
int err;
err = ht16k33_brightness_set(priv, brightness);
if (err)
return err;
return linedisp_register(linedisp, dev, 4, &ht16k33_linedisp_ops);
}
static int ht16k33_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ht16k33_priv *priv;
uint32_t dft_brightness;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(dev, "i2c_check_functionality error\n");
return -EIO;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->client = client;
priv->type = (uintptr_t)i2c_get_match_data(client);
i2c_set_clientdata(client, priv);
err = ht16k33_initialize(priv);
if (err)
return err;
err = device_property_read_u32(dev, "default-brightness-level",
&dft_brightness);
if (err) {
dft_brightness = MAX_BRIGHTNESS;
} else if (dft_brightness > MAX_BRIGHTNESS) {
dev_warn(dev,
"invalid default brightness level: %u, using %u\n",
dft_brightness, MAX_BRIGHTNESS);
dft_brightness = MAX_BRIGHTNESS;
}
/* LED */
err = ht16k33_led_probe(dev, &priv->led, dft_brightness);
if (err)
return err;
/* Keypad */
if (client->irq > 0) {
err = ht16k33_keypad_probe(client, &priv->keypad);
if (err)
return err;
}
switch (priv->type) {
case DISP_MATRIX:
/* Frame Buffer Display */
err = ht16k33_fbdev_probe(dev, priv, dft_brightness);
break;
case DISP_QUAD_7SEG:
case DISP_QUAD_14SEG:
/* Segment Display */
err = ht16k33_seg_probe(dev, priv, dft_brightness);
break;
default:
return -EINVAL;
}
return err;
}
static void ht16k33_remove(struct i2c_client *client)
{
struct ht16k33_priv *priv = i2c_get_clientdata(client);
struct ht16k33_fbdev *fbdev = &priv->fbdev;
cancel_delayed_work_sync(&priv->work);
switch (priv->type) {
case DISP_MATRIX:
unregister_framebuffer(fbdev->info);
framebuffer_release(fbdev->info);
free_page((unsigned long)fbdev->buffer);
break;
case DISP_QUAD_7SEG:
case DISP_QUAD_14SEG:
linedisp_unregister(&priv->linedisp);
break;
default:
break;
}
}
static const struct i2c_device_id ht16k33_i2c_match[] = {
{ "3108", DISP_QUAD_7SEG },
{ "3130", DISP_QUAD_14SEG },
{ "ht16k33", DISP_MATRIX },
{ }
};
MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
static const struct of_device_id ht16k33_of_match[] = {
{
/* 0.56" 4-Digit 7-Segment FeatherWing Display (Red) */
.compatible = "adafruit,3108", .data = (void *)DISP_QUAD_7SEG,
}, {
/* 0.54" Quad Alphanumeric FeatherWing Display (Red) */
.compatible = "adafruit,3130", .data = (void *)DISP_QUAD_14SEG,
}, {
/* Generic, assumed Dot-Matrix Display */
.compatible = "holtek,ht16k33", .data = (void *)DISP_MATRIX,
},
{ }
};
MODULE_DEVICE_TABLE(of, ht16k33_of_match);
static struct i2c_driver ht16k33_driver = {
.probe = ht16k33_probe,
.remove = ht16k33_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ht16k33_of_match,
},
.id_table = ht16k33_i2c_match,
};
module_i2c_driver(ht16k33_driver);
MODULE_DESCRIPTION("Holtek HT16K33 driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("LINEDISP");
MODULE_AUTHOR("Robin van der Gracht <[email protected]>");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Performance counter support for POWER8 processors.
*
* Copyright 2014 Sukadev Bhattiprolu, IBM Corporation.
*/
/*
* Power8 event codes.
*/
EVENT(PM_CYC, 0x0001e)
EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
EVENT(PM_CMPLU_STALL, 0x4000a)
EVENT(PM_INST_CMPL, 0x00002)
EVENT(PM_BRU_FIN, 0x10068)
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* All L1 D cache load references counted at finish, gated by reject */
EVENT(PM_LD_REF_L1, 0x100ee)
/* Load Missed L1 */
EVENT(PM_LD_MISS_L1, 0x3e054)
/* Store Missed L1 */
EVENT(PM_ST_MISS_L1, 0x300f0)
/* L1 cache data prefetches */
EVENT(PM_L1_PREF, 0x0d8b8)
/* Instruction fetches from L1 */
EVENT(PM_INST_FROM_L1, 0x04080)
/* Demand iCache Miss */
EVENT(PM_L1_ICACHE_MISS, 0x200fd)
/* Instruction Demand sectors wriittent into IL1 */
EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
/* Instruction prefetch written into IL1 */
EVENT(PM_IC_PREF_WRITE, 0x0408e)
/* The data cache was reloaded from local core's L3 due to a demand load */
EVENT(PM_DATA_FROM_L3, 0x4c042)
/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
/* All successful D-side store dispatches for this thread */
EVENT(PM_L2_ST, 0x17080)
/* All successful D-side store dispatches for this thread that were L2 Miss */
EVENT(PM_L2_ST_MISS, 0x17082)
/* Total HW L3 prefetches(Load+store) */
EVENT(PM_L3_PREF_ALL, 0x4e052)
/* Data PTEG reload */
EVENT(PM_DTLB_MISS, 0x300fc)
/* ITLB Reloaded */
EVENT(PM_ITLB_MISS, 0x400fc)
/* Run_Instructions */
EVENT(PM_RUN_INST_CMPL, 0x500fa)
/* Alternate event code for PM_RUN_INST_CMPL */
EVENT(PM_RUN_INST_CMPL_ALT, 0x400fa)
/* Run_cycles */
EVENT(PM_RUN_CYC, 0x600f4)
/* Alternate event code for Run_cycles */
EVENT(PM_RUN_CYC_ALT, 0x200f4)
/* Marked store completed */
EVENT(PM_MRK_ST_CMPL, 0x10134)
/* Alternate event code for Marked store completed */
EVENT(PM_MRK_ST_CMPL_ALT, 0x301e2)
/* Marked two path branch */
EVENT(PM_BR_MRK_2PATH, 0x10138)
/* Alternate event code for PM_BR_MRK_2PATH */
EVENT(PM_BR_MRK_2PATH_ALT, 0x40138)
/* L3 castouts in Mepf state */
EVENT(PM_L3_CO_MEPF, 0x18082)
/* Alternate event code for PM_L3_CO_MEPF */
EVENT(PM_L3_CO_MEPF_ALT, 0x3e05e)
/* Data cache was reloaded from a location other than L2 due to a marked load */
EVENT(PM_MRK_DATA_FROM_L2MISS, 0x1d14e)
/* Alternate event code for PM_MRK_DATA_FROM_L2MISS */
EVENT(PM_MRK_DATA_FROM_L2MISS_ALT, 0x401e8)
/* Alternate event code for PM_CMPLU_STALL */
EVENT(PM_CMPLU_STALL_ALT, 0x1e054)
/* Two path branch */
EVENT(PM_BR_2PATH, 0x20036)
/* Alternate event code for PM_BR_2PATH */
EVENT(PM_BR_2PATH_ALT, 0x40036)
/* # PPC Dispatched */
EVENT(PM_INST_DISP, 0x200f2)
/* Alternate event code for PM_INST_DISP */
EVENT(PM_INST_DISP_ALT, 0x300f2)
/* Marked filter Match */
EVENT(PM_MRK_FILT_MATCH, 0x2013c)
/* Alternate event code for PM_MRK_FILT_MATCH */
EVENT(PM_MRK_FILT_MATCH_ALT, 0x3012e)
/* Alternate event code for PM_LD_MISS_L1 */
EVENT(PM_LD_MISS_L1_ALT, 0x400f0)
/*
* Memory Access Event -- mem_access
* Primary PMU event used here is PM_MRK_INST_CMPL, along with
* Random Load/Store Facility Sampling (RIS) in Random sampling mode (MMCRA[SM]).
*/
EVENT(MEM_ACCESS, 0x10401e0)
|
/*
* Driver header file for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
* Viresh Kumar <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#ifndef __PINMUX_SPEAR_H__
#define __PINMUX_SPEAR_H__
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/regmap.h>
#include <linux/types.h>
struct platform_device;
struct device;
struct spear_pmx;
/**
* struct spear_pmx_mode - SPEAr pmx mode
* @name: name of pmx mode
* @mode: mode id
* @reg: register for configuring this mode
* @mask: mask of this mode in reg
* @val: val to be configured at reg after doing (val & mask)
*/
struct spear_pmx_mode {
const char *const name;
u16 mode;
u16 reg;
u16 mask;
u32 val;
};
/**
* struct spear_muxreg - SPEAr mux reg configuration
* @reg: register offset
* @mask: mask bits
* @val: val to be written on mask bits
*/
struct spear_muxreg {
u16 reg;
u32 mask;
u32 val;
};
struct spear_gpio_pingroup {
const unsigned *pins;
unsigned npins;
struct spear_muxreg *muxregs;
u8 nmuxregs;
};
/* ste: set to enable */
#define DEFINE_MUXREG(__pins, __muxreg, __mask, __ste) \
static struct spear_muxreg __pins##_muxregs[] = { \
{ \
.reg = __muxreg, \
.mask = __mask, \
.val = __ste ? __mask : 0, \
}, \
}
#define DEFINE_2_MUXREG(__pins, __muxreg1, __muxreg2, __mask, __ste1, __ste2) \
static struct spear_muxreg __pins##_muxregs[] = { \
{ \
.reg = __muxreg1, \
.mask = __mask, \
.val = __ste1 ? __mask : 0, \
}, { \
.reg = __muxreg2, \
.mask = __mask, \
.val = __ste2 ? __mask : 0, \
}, \
}
#define GPIO_PINGROUP(__pins) \
{ \
.pins = __pins, \
.npins = ARRAY_SIZE(__pins), \
.muxregs = __pins##_muxregs, \
.nmuxregs = ARRAY_SIZE(__pins##_muxregs), \
}
/**
* struct spear_modemux - SPEAr mode mux configuration
* @modes: mode ids supported by this group of muxregs
* @nmuxregs: number of muxreg configurations to be done for modes
* @muxregs: array of muxreg configurations to be done for modes
*/
struct spear_modemux {
u16 modes;
u8 nmuxregs;
struct spear_muxreg *muxregs;
};
/**
* struct spear_pingroup - SPEAr pin group configurations
* @name: name of pin group
* @pins: array containing pin numbers
* @npins: size of pins array
* @modemuxs: array of modemux configurations for this pin group
* @nmodemuxs: size of array modemuxs
*
* A representation of a group of pins in the SPEAr pin controller. Each group
* allows some parameter or parameters to be configured.
*/
struct spear_pingroup {
const char *name;
const unsigned *pins;
unsigned npins;
struct spear_modemux *modemuxs;
unsigned nmodemuxs;
};
/**
* struct spear_function - SPEAr pinctrl mux function
* @name: The name of the function, exported to pinctrl core.
* @groups: An array of pin groups that may select this function.
* @ngroups: The number of entries in @groups.
*/
struct spear_function {
const char *name;
const char *const *groups;
unsigned ngroups;
};
/**
* struct spear_pinctrl_machdata - SPEAr pin controller machine driver
* configuration
* @pins: An array describing all pins the pin controller affects.
* All pins which are also GPIOs must be listed first within the *array,
* and be numbered identically to the GPIO controller's *numbering.
* @npins: The numbmer of entries in @pins.
* @functions: An array describing all mux functions the SoC supports.
* @nfunctions: The numbmer of entries in @functions.
* @groups: An array describing all pin groups the pin SoC supports.
* @ngroups: The numbmer of entries in @groups.
* @gpio_pingroups: gpio pingroups
* @ngpio_pingroups: gpio pingroups count
*
* @modes_supported: Does SoC support modes
* @mode: mode configured from probe
* @pmx_modes: array of modes supported by SoC
* @npmx_modes: number of entries in pmx_modes.
*/
struct spear_pinctrl_machdata {
const struct pinctrl_pin_desc *pins;
unsigned npins;
struct spear_function **functions;
unsigned nfunctions;
struct spear_pingroup **groups;
unsigned ngroups;
struct spear_gpio_pingroup *gpio_pingroups;
void (*gpio_request_endisable)(struct spear_pmx *pmx, int offset,
bool enable);
unsigned ngpio_pingroups;
bool modes_supported;
u16 mode;
struct spear_pmx_mode **pmx_modes;
unsigned npmx_modes;
};
/**
* struct spear_pmx - SPEAr pinctrl mux
* @dev: pointer to struct dev of platform_device registered
* @pctl: pointer to struct pinctrl_dev
* @machdata: pointer to SoC or machine specific structure
* @regmap: regmap of pinmux controller
*/
struct spear_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
struct spear_pinctrl_machdata *machdata;
struct regmap *regmap;
};
/* exported routines */
static inline u32 pmx_readl(struct spear_pmx *pmx, u32 reg)
{
u32 val;
regmap_read(pmx->regmap, reg, &val);
return val;
}
static inline void pmx_writel(struct spear_pmx *pmx, u32 val, u32 reg)
{
regmap_write(pmx->regmap, reg, val);
}
void pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
void pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
unsigned count, u16 reg);
int spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata);
#define SPEAR_PIN_0_TO_101 \
PINCTRL_PIN(0, "PLGPIO0"), \
PINCTRL_PIN(1, "PLGPIO1"), \
PINCTRL_PIN(2, "PLGPIO2"), \
PINCTRL_PIN(3, "PLGPIO3"), \
PINCTRL_PIN(4, "PLGPIO4"), \
PINCTRL_PIN(5, "PLGPIO5"), \
PINCTRL_PIN(6, "PLGPIO6"), \
PINCTRL_PIN(7, "PLGPIO7"), \
PINCTRL_PIN(8, "PLGPIO8"), \
PINCTRL_PIN(9, "PLGPIO9"), \
PINCTRL_PIN(10, "PLGPIO10"), \
PINCTRL_PIN(11, "PLGPIO11"), \
PINCTRL_PIN(12, "PLGPIO12"), \
PINCTRL_PIN(13, "PLGPIO13"), \
PINCTRL_PIN(14, "PLGPIO14"), \
PINCTRL_PIN(15, "PLGPIO15"), \
PINCTRL_PIN(16, "PLGPIO16"), \
PINCTRL_PIN(17, "PLGPIO17"), \
PINCTRL_PIN(18, "PLGPIO18"), \
PINCTRL_PIN(19, "PLGPIO19"), \
PINCTRL_PIN(20, "PLGPIO20"), \
PINCTRL_PIN(21, "PLGPIO21"), \
PINCTRL_PIN(22, "PLGPIO22"), \
PINCTRL_PIN(23, "PLGPIO23"), \
PINCTRL_PIN(24, "PLGPIO24"), \
PINCTRL_PIN(25, "PLGPIO25"), \
PINCTRL_PIN(26, "PLGPIO26"), \
PINCTRL_PIN(27, "PLGPIO27"), \
PINCTRL_PIN(28, "PLGPIO28"), \
PINCTRL_PIN(29, "PLGPIO29"), \
PINCTRL_PIN(30, "PLGPIO30"), \
PINCTRL_PIN(31, "PLGPIO31"), \
PINCTRL_PIN(32, "PLGPIO32"), \
PINCTRL_PIN(33, "PLGPIO33"), \
PINCTRL_PIN(34, "PLGPIO34"), \
PINCTRL_PIN(35, "PLGPIO35"), \
PINCTRL_PIN(36, "PLGPIO36"), \
PINCTRL_PIN(37, "PLGPIO37"), \
PINCTRL_PIN(38, "PLGPIO38"), \
PINCTRL_PIN(39, "PLGPIO39"), \
PINCTRL_PIN(40, "PLGPIO40"), \
PINCTRL_PIN(41, "PLGPIO41"), \
PINCTRL_PIN(42, "PLGPIO42"), \
PINCTRL_PIN(43, "PLGPIO43"), \
PINCTRL_PIN(44, "PLGPIO44"), \
PINCTRL_PIN(45, "PLGPIO45"), \
PINCTRL_PIN(46, "PLGPIO46"), \
PINCTRL_PIN(47, "PLGPIO47"), \
PINCTRL_PIN(48, "PLGPIO48"), \
PINCTRL_PIN(49, "PLGPIO49"), \
PINCTRL_PIN(50, "PLGPIO50"), \
PINCTRL_PIN(51, "PLGPIO51"), \
PINCTRL_PIN(52, "PLGPIO52"), \
PINCTRL_PIN(53, "PLGPIO53"), \
PINCTRL_PIN(54, "PLGPIO54"), \
PINCTRL_PIN(55, "PLGPIO55"), \
PINCTRL_PIN(56, "PLGPIO56"), \
PINCTRL_PIN(57, "PLGPIO57"), \
PINCTRL_PIN(58, "PLGPIO58"), \
PINCTRL_PIN(59, "PLGPIO59"), \
PINCTRL_PIN(60, "PLGPIO60"), \
PINCTRL_PIN(61, "PLGPIO61"), \
PINCTRL_PIN(62, "PLGPIO62"), \
PINCTRL_PIN(63, "PLGPIO63"), \
PINCTRL_PIN(64, "PLGPIO64"), \
PINCTRL_PIN(65, "PLGPIO65"), \
PINCTRL_PIN(66, "PLGPIO66"), \
PINCTRL_PIN(67, "PLGPIO67"), \
PINCTRL_PIN(68, "PLGPIO68"), \
PINCTRL_PIN(69, "PLGPIO69"), \
PINCTRL_PIN(70, "PLGPIO70"), \
PINCTRL_PIN(71, "PLGPIO71"), \
PINCTRL_PIN(72, "PLGPIO72"), \
PINCTRL_PIN(73, "PLGPIO73"), \
PINCTRL_PIN(74, "PLGPIO74"), \
PINCTRL_PIN(75, "PLGPIO75"), \
PINCTRL_PIN(76, "PLGPIO76"), \
PINCTRL_PIN(77, "PLGPIO77"), \
PINCTRL_PIN(78, "PLGPIO78"), \
PINCTRL_PIN(79, "PLGPIO79"), \
PINCTRL_PIN(80, "PLGPIO80"), \
PINCTRL_PIN(81, "PLGPIO81"), \
PINCTRL_PIN(82, "PLGPIO82"), \
PINCTRL_PIN(83, "PLGPIO83"), \
PINCTRL_PIN(84, "PLGPIO84"), \
PINCTRL_PIN(85, "PLGPIO85"), \
PINCTRL_PIN(86, "PLGPIO86"), \
PINCTRL_PIN(87, "PLGPIO87"), \
PINCTRL_PIN(88, "PLGPIO88"), \
PINCTRL_PIN(89, "PLGPIO89"), \
PINCTRL_PIN(90, "PLGPIO90"), \
PINCTRL_PIN(91, "PLGPIO91"), \
PINCTRL_PIN(92, "PLGPIO92"), \
PINCTRL_PIN(93, "PLGPIO93"), \
PINCTRL_PIN(94, "PLGPIO94"), \
PINCTRL_PIN(95, "PLGPIO95"), \
PINCTRL_PIN(96, "PLGPIO96"), \
PINCTRL_PIN(97, "PLGPIO97"), \
PINCTRL_PIN(98, "PLGPIO98"), \
PINCTRL_PIN(99, "PLGPIO99"), \
PINCTRL_PIN(100, "PLGPIO100"), \
PINCTRL_PIN(101, "PLGPIO101")
#define SPEAR_PIN_102_TO_245 \
PINCTRL_PIN(102, "PLGPIO102"), \
PINCTRL_PIN(103, "PLGPIO103"), \
PINCTRL_PIN(104, "PLGPIO104"), \
PINCTRL_PIN(105, "PLGPIO105"), \
PINCTRL_PIN(106, "PLGPIO106"), \
PINCTRL_PIN(107, "PLGPIO107"), \
PINCTRL_PIN(108, "PLGPIO108"), \
PINCTRL_PIN(109, "PLGPIO109"), \
PINCTRL_PIN(110, "PLGPIO110"), \
PINCTRL_PIN(111, "PLGPIO111"), \
PINCTRL_PIN(112, "PLGPIO112"), \
PINCTRL_PIN(113, "PLGPIO113"), \
PINCTRL_PIN(114, "PLGPIO114"), \
PINCTRL_PIN(115, "PLGPIO115"), \
PINCTRL_PIN(116, "PLGPIO116"), \
PINCTRL_PIN(117, "PLGPIO117"), \
PINCTRL_PIN(118, "PLGPIO118"), \
PINCTRL_PIN(119, "PLGPIO119"), \
PINCTRL_PIN(120, "PLGPIO120"), \
PINCTRL_PIN(121, "PLGPIO121"), \
PINCTRL_PIN(122, "PLGPIO122"), \
PINCTRL_PIN(123, "PLGPIO123"), \
PINCTRL_PIN(124, "PLGPIO124"), \
PINCTRL_PIN(125, "PLGPIO125"), \
PINCTRL_PIN(126, "PLGPIO126"), \
PINCTRL_PIN(127, "PLGPIO127"), \
PINCTRL_PIN(128, "PLGPIO128"), \
PINCTRL_PIN(129, "PLGPIO129"), \
PINCTRL_PIN(130, "PLGPIO130"), \
PINCTRL_PIN(131, "PLGPIO131"), \
PINCTRL_PIN(132, "PLGPIO132"), \
PINCTRL_PIN(133, "PLGPIO133"), \
PINCTRL_PIN(134, "PLGPIO134"), \
PINCTRL_PIN(135, "PLGPIO135"), \
PINCTRL_PIN(136, "PLGPIO136"), \
PINCTRL_PIN(137, "PLGPIO137"), \
PINCTRL_PIN(138, "PLGPIO138"), \
PINCTRL_PIN(139, "PLGPIO139"), \
PINCTRL_PIN(140, "PLGPIO140"), \
PINCTRL_PIN(141, "PLGPIO141"), \
PINCTRL_PIN(142, "PLGPIO142"), \
PINCTRL_PIN(143, "PLGPIO143"), \
PINCTRL_PIN(144, "PLGPIO144"), \
PINCTRL_PIN(145, "PLGPIO145"), \
PINCTRL_PIN(146, "PLGPIO146"), \
PINCTRL_PIN(147, "PLGPIO147"), \
PINCTRL_PIN(148, "PLGPIO148"), \
PINCTRL_PIN(149, "PLGPIO149"), \
PINCTRL_PIN(150, "PLGPIO150"), \
PINCTRL_PIN(151, "PLGPIO151"), \
PINCTRL_PIN(152, "PLGPIO152"), \
PINCTRL_PIN(153, "PLGPIO153"), \
PINCTRL_PIN(154, "PLGPIO154"), \
PINCTRL_PIN(155, "PLGPIO155"), \
PINCTRL_PIN(156, "PLGPIO156"), \
PINCTRL_PIN(157, "PLGPIO157"), \
PINCTRL_PIN(158, "PLGPIO158"), \
PINCTRL_PIN(159, "PLGPIO159"), \
PINCTRL_PIN(160, "PLGPIO160"), \
PINCTRL_PIN(161, "PLGPIO161"), \
PINCTRL_PIN(162, "PLGPIO162"), \
PINCTRL_PIN(163, "PLGPIO163"), \
PINCTRL_PIN(164, "PLGPIO164"), \
PINCTRL_PIN(165, "PLGPIO165"), \
PINCTRL_PIN(166, "PLGPIO166"), \
PINCTRL_PIN(167, "PLGPIO167"), \
PINCTRL_PIN(168, "PLGPIO168"), \
PINCTRL_PIN(169, "PLGPIO169"), \
PINCTRL_PIN(170, "PLGPIO170"), \
PINCTRL_PIN(171, "PLGPIO171"), \
PINCTRL_PIN(172, "PLGPIO172"), \
PINCTRL_PIN(173, "PLGPIO173"), \
PINCTRL_PIN(174, "PLGPIO174"), \
PINCTRL_PIN(175, "PLGPIO175"), \
PINCTRL_PIN(176, "PLGPIO176"), \
PINCTRL_PIN(177, "PLGPIO177"), \
PINCTRL_PIN(178, "PLGPIO178"), \
PINCTRL_PIN(179, "PLGPIO179"), \
PINCTRL_PIN(180, "PLGPIO180"), \
PINCTRL_PIN(181, "PLGPIO181"), \
PINCTRL_PIN(182, "PLGPIO182"), \
PINCTRL_PIN(183, "PLGPIO183"), \
PINCTRL_PIN(184, "PLGPIO184"), \
PINCTRL_PIN(185, "PLGPIO185"), \
PINCTRL_PIN(186, "PLGPIO186"), \
PINCTRL_PIN(187, "PLGPIO187"), \
PINCTRL_PIN(188, "PLGPIO188"), \
PINCTRL_PIN(189, "PLGPIO189"), \
PINCTRL_PIN(190, "PLGPIO190"), \
PINCTRL_PIN(191, "PLGPIO191"), \
PINCTRL_PIN(192, "PLGPIO192"), \
PINCTRL_PIN(193, "PLGPIO193"), \
PINCTRL_PIN(194, "PLGPIO194"), \
PINCTRL_PIN(195, "PLGPIO195"), \
PINCTRL_PIN(196, "PLGPIO196"), \
PINCTRL_PIN(197, "PLGPIO197"), \
PINCTRL_PIN(198, "PLGPIO198"), \
PINCTRL_PIN(199, "PLGPIO199"), \
PINCTRL_PIN(200, "PLGPIO200"), \
PINCTRL_PIN(201, "PLGPIO201"), \
PINCTRL_PIN(202, "PLGPIO202"), \
PINCTRL_PIN(203, "PLGPIO203"), \
PINCTRL_PIN(204, "PLGPIO204"), \
PINCTRL_PIN(205, "PLGPIO205"), \
PINCTRL_PIN(206, "PLGPIO206"), \
PINCTRL_PIN(207, "PLGPIO207"), \
PINCTRL_PIN(208, "PLGPIO208"), \
PINCTRL_PIN(209, "PLGPIO209"), \
PINCTRL_PIN(210, "PLGPIO210"), \
PINCTRL_PIN(211, "PLGPIO211"), \
PINCTRL_PIN(212, "PLGPIO212"), \
PINCTRL_PIN(213, "PLGPIO213"), \
PINCTRL_PIN(214, "PLGPIO214"), \
PINCTRL_PIN(215, "PLGPIO215"), \
PINCTRL_PIN(216, "PLGPIO216"), \
PINCTRL_PIN(217, "PLGPIO217"), \
PINCTRL_PIN(218, "PLGPIO218"), \
PINCTRL_PIN(219, "PLGPIO219"), \
PINCTRL_PIN(220, "PLGPIO220"), \
PINCTRL_PIN(221, "PLGPIO221"), \
PINCTRL_PIN(222, "PLGPIO222"), \
PINCTRL_PIN(223, "PLGPIO223"), \
PINCTRL_PIN(224, "PLGPIO224"), \
PINCTRL_PIN(225, "PLGPIO225"), \
PINCTRL_PIN(226, "PLGPIO226"), \
PINCTRL_PIN(227, "PLGPIO227"), \
PINCTRL_PIN(228, "PLGPIO228"), \
PINCTRL_PIN(229, "PLGPIO229"), \
PINCTRL_PIN(230, "PLGPIO230"), \
PINCTRL_PIN(231, "PLGPIO231"), \
PINCTRL_PIN(232, "PLGPIO232"), \
PINCTRL_PIN(233, "PLGPIO233"), \
PINCTRL_PIN(234, "PLGPIO234"), \
PINCTRL_PIN(235, "PLGPIO235"), \
PINCTRL_PIN(236, "PLGPIO236"), \
PINCTRL_PIN(237, "PLGPIO237"), \
PINCTRL_PIN(238, "PLGPIO238"), \
PINCTRL_PIN(239, "PLGPIO239"), \
PINCTRL_PIN(240, "PLGPIO240"), \
PINCTRL_PIN(241, "PLGPIO241"), \
PINCTRL_PIN(242, "PLGPIO242"), \
PINCTRL_PIN(243, "PLGPIO243"), \
PINCTRL_PIN(244, "PLGPIO244"), \
PINCTRL_PIN(245, "PLGPIO245")
#endif /* __PINMUX_SPEAR_H__ */
|
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
#include "gpio_regs.h"
#define ONE_MORE_0 1
#define ONE_MORE_1 2
#define ONE_MORE_2 3
#define ONE_MORE_3 4
#define ONE_MORE_4 5
#define ONE_MORE_5 6
#define HPD_GPIO_REG_LIST_ENTRY(type, cd, id) \
.type ## _reg = REG(DC_GPIO_HPD_## type),\
.type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
#define HPD_GPIO_REG_LIST(id) \
{\
HPD_GPIO_REG_LIST_ENTRY(MASK, cd, id),\
HPD_GPIO_REG_LIST_ENTRY(A, cd, id),\
HPD_GPIO_REG_LIST_ENTRY(EN, cd, id),\
HPD_GPIO_REG_LIST_ENTRY(Y, cd, id)\
}
#define HPD_REG_LIST(id) \
HPD_GPIO_REG_LIST(ONE_MORE_ ## id), \
.int_status = REGI(DC_HPD_INT_STATUS, HPD, id),\
.toggle_filt_cntl = REGI(DC_HPD_TOGGLE_FILT_CNTL, HPD, id)
#define HPD_MASK_SH_LIST(mask_sh) \
SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED, mask_sh),\
SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE, mask_sh),\
SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_CONNECT_INT_DELAY, mask_sh),\
SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_DISCONNECT_INT_DELAY, mask_sh)
struct hpd_registers {
struct gpio_registers gpio;
uint32_t int_status;
uint32_t toggle_filt_cntl;
};
struct hpd_sh_mask {
/* int_status */
uint32_t DC_HPD_SENSE_DELAYED;
uint32_t DC_HPD_SENSE;
/* toggle_filt_cntl */
uint32_t DC_HPD_CONNECT_INT_DELAY;
uint32_t DC_HPD_DISCONNECT_INT_DELAY;
};
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_ */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MFD_TPS6586X_H
#define __LINUX_MFD_TPS6586X_H
#define TPS6586X_SLEW_RATE_INSTANTLY 0x00
#define TPS6586X_SLEW_RATE_110UV 0x01
#define TPS6586X_SLEW_RATE_220UV 0x02
#define TPS6586X_SLEW_RATE_440UV 0x03
#define TPS6586X_SLEW_RATE_880UV 0x04
#define TPS6586X_SLEW_RATE_1760UV 0x05
#define TPS6586X_SLEW_RATE_3520UV 0x06
#define TPS6586X_SLEW_RATE_7040UV 0x07
#define TPS6586X_SLEW_RATE_SET 0x08
#define TPS6586X_SLEW_RATE_MASK 0x07
/* VERSION CRC */
#define TPS658621A 0x15
#define TPS658621CD 0x2c
#define TPS658623 0x1b
#define TPS658624 0x0a
#define TPS658640 0x01
#define TPS658640v2 0x02
#define TPS658643 0x03
enum {
TPS6586X_ID_SYS,
TPS6586X_ID_SM_0,
TPS6586X_ID_SM_1,
TPS6586X_ID_SM_2,
TPS6586X_ID_LDO_0,
TPS6586X_ID_LDO_1,
TPS6586X_ID_LDO_2,
TPS6586X_ID_LDO_3,
TPS6586X_ID_LDO_4,
TPS6586X_ID_LDO_5,
TPS6586X_ID_LDO_6,
TPS6586X_ID_LDO_7,
TPS6586X_ID_LDO_8,
TPS6586X_ID_LDO_9,
TPS6586X_ID_LDO_RTC,
TPS6586X_ID_MAX_REGULATOR,
};
enum {
TPS6586X_INT_PLDO_0,
TPS6586X_INT_PLDO_1,
TPS6586X_INT_PLDO_2,
TPS6586X_INT_PLDO_3,
TPS6586X_INT_PLDO_4,
TPS6586X_INT_PLDO_5,
TPS6586X_INT_PLDO_6,
TPS6586X_INT_PLDO_7,
TPS6586X_INT_COMP_DET,
TPS6586X_INT_ADC,
TPS6586X_INT_PLDO_8,
TPS6586X_INT_PLDO_9,
TPS6586X_INT_PSM_0,
TPS6586X_INT_PSM_1,
TPS6586X_INT_PSM_2,
TPS6586X_INT_PSM_3,
TPS6586X_INT_RTC_ALM1,
TPS6586X_INT_ACUSB_OVP,
TPS6586X_INT_USB_DET,
TPS6586X_INT_AC_DET,
TPS6586X_INT_BAT_DET,
TPS6586X_INT_CHG_STAT,
TPS6586X_INT_CHG_TEMP,
TPS6586X_INT_PP,
TPS6586X_INT_RESUME,
TPS6586X_INT_LOW_SYS,
TPS6586X_INT_RTC_ALM2,
};
struct tps6586x_settings {
int slew_rate;
};
struct tps6586x_subdev_info {
int id;
const char *name;
void *platform_data;
struct device_node *of_node;
};
struct tps6586x_platform_data {
int num_subdevs;
struct tps6586x_subdev_info *subdevs;
int gpio_base;
int irq_base;
bool pm_off;
struct regulator_init_data *reg_init_data[TPS6586X_ID_MAX_REGULATOR];
};
/*
* NOTE: the functions below are not intended for use outside
* of the TPS6586X sub-device drivers
*/
extern int tps6586x_write(struct device *dev, int reg, uint8_t val);
extern int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val);
extern int tps6586x_read(struct device *dev, int reg, uint8_t *val);
extern int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val);
extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
extern int tps6586x_update(struct device *dev, int reg, uint8_t val,
uint8_t mask);
extern int tps6586x_irq_get_virq(struct device *dev, int irq);
extern int tps6586x_get_version(struct device *dev);
#endif /*__LINUX_MFD_TPS6586X_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2005 Oracle. All rights reserved.
*/
/* This quorum hack is only here until we transition to some more rational
* approach that is driven from userspace. Honest. No foolin'.
*
* Imagine two nodes lose network connectivity to each other but they're still
* up and operating in every other way. Presumably a network timeout indicates
* that a node is broken and should be recovered. They can't both recover each
* other and both carry on without serialising their access to the file system.
* They need to decide who is authoritative. Now extend that problem to
* arbitrary groups of nodes losing connectivity between each other.
*
* So we declare that a node which has given up on connecting to a majority
* of nodes who are still heartbeating will fence itself.
*
* There are huge opportunities for races here. After we give up on a node's
* connection we need to wait long enough to give heartbeat an opportunity
* to declare the node as truly dead. We also need to be careful with the
* race between when we see a node start heartbeating and when we connect
* to it.
*
* So nodes that are in this transtion put a hold on the quorum decision
* with a counter. As they fall out of this transition they drop the count
* and if they're the last, they fire off the decision.
*/
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include "heartbeat.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_QUORUM
#include "masklog.h"
#include "quorum.h"
static struct o2quo_state {
spinlock_t qs_lock;
struct work_struct qs_work;
int qs_pending;
int qs_heartbeating;
unsigned long qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
int qs_connected;
unsigned long qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
int qs_holds;
unsigned long qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
} o2quo_state;
/* this is horribly heavy-handed. It should instead flip the file
* system RO and call some userspace script. */
static void o2quo_fence_self(void)
{
/* panic spins with interrupts enabled. with preempt
* threads can still schedule, etc, etc */
o2hb_stop_all_regions();
switch (o2nm_single_cluster->cl_fence_method) {
case O2NM_FENCE_PANIC:
panic("*** ocfs2 is very sorry to be fencing this system by "
"panicking ***\n");
break;
default:
WARN_ON(o2nm_single_cluster->cl_fence_method >=
O2NM_FENCE_METHODS);
fallthrough;
case O2NM_FENCE_RESET:
printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this "
"system by restarting ***\n");
emergency_restart();
break;
}
}
/* Indicate that a timeout occurred on a heartbeat region write. The
* other nodes in the cluster may consider us dead at that time so we
* want to "fence" ourselves so that we don't scribble on the disk
* after they think they've recovered us. This can't solve all
* problems related to writeout after recovery but this hack can at
* least close some of those gaps. When we have real fencing, this can
* go away as our node would be fenced externally before other nodes
* begin recovery. */
void o2quo_disk_timeout(void)
{
o2quo_fence_self();
}
static void o2quo_make_decision(struct work_struct *work)
{
int quorum;
int lowest_hb, lowest_reachable = 0, fence = 0;
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
if (lowest_hb != O2NM_MAX_NODES)
lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
mlog(0, "heartbeating: %d, connected: %d, "
"lowest: %d (%sreachable)\n", qs->qs_heartbeating,
qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
qs->qs_heartbeating == 1)
goto out;
if (qs->qs_heartbeating & 1) {
/* the odd numbered cluster case is straight forward --
* if we can't talk to the majority we're hosed */
quorum = (qs->qs_heartbeating + 1)/2;
if (qs->qs_connected < quorum) {
mlog(ML_ERROR, "fencing this node because it is "
"only connected to %u nodes and %u is needed "
"to make a quorum out of %u heartbeating nodes\n",
qs->qs_connected, quorum,
qs->qs_heartbeating);
fence = 1;
}
} else {
/* the even numbered cluster adds the possibility of each half
* of the cluster being able to talk amongst themselves.. in
* that case we're hosed if we can't talk to the group that has
* the lowest numbered node */
quorum = qs->qs_heartbeating / 2;
if (qs->qs_connected < quorum) {
mlog(ML_ERROR, "fencing this node because it is "
"only connected to %u nodes and %u is needed "
"to make a quorum out of %u heartbeating nodes\n",
qs->qs_connected, quorum,
qs->qs_heartbeating);
fence = 1;
}
else if ((qs->qs_connected == quorum) &&
!lowest_reachable) {
mlog(ML_ERROR, "fencing this node because it is "
"connected to a half-quorum of %u out of %u "
"nodes which doesn't include the lowest active "
"node %u\n", quorum, qs->qs_heartbeating,
lowest_hb);
fence = 1;
}
}
out:
if (fence) {
spin_unlock_bh(&qs->qs_lock);
o2quo_fence_self();
} else {
mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, "
"connected: %d, lowest: %d (%sreachable)\n",
qs->qs_heartbeating, qs->qs_connected, lowest_hb,
lowest_reachable ? "" : "un");
spin_unlock_bh(&qs->qs_lock);
}
}
static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
{
assert_spin_locked(&qs->qs_lock);
if (!test_and_set_bit(node, qs->qs_hold_bm)) {
qs->qs_holds++;
mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
"node %u\n", node);
mlog(0, "node %u, %d total\n", node, qs->qs_holds);
}
}
static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
{
assert_spin_locked(&qs->qs_lock);
if (test_and_clear_bit(node, qs->qs_hold_bm)) {
mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
if (--qs->qs_holds == 0) {
if (qs->qs_pending) {
qs->qs_pending = 0;
schedule_work(&qs->qs_work);
}
}
mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
node, qs->qs_holds);
}
}
/* as a node comes up we delay the quorum decision until we know the fate of
* the connection. the hold will be droped in conn_up or hb_down. it might be
* perpetuated by con_err until hb_down. if we already have a conn, we might
* be dropping a hold that conn_up got. */
void o2quo_hb_up(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
qs->qs_heartbeating++;
mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
"node %u\n", node);
mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
set_bit(node, qs->qs_hb_bm);
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
if (!test_bit(node, qs->qs_conn_bm))
o2quo_set_hold(qs, node);
else
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* hb going down releases any holds we might have had due to this node from
* conn_up, conn_err, or hb_up */
void o2quo_hb_down(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
qs->qs_heartbeating--;
mlog_bug_on_msg(qs->qs_heartbeating < 0,
"node %u, %d heartbeating\n",
node, qs->qs_heartbeating);
mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
clear_bit(node, qs->qs_hb_bm);
mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* this tells us that we've decided that the node is still heartbeating
* even though we've lost it's conn. it must only be called after conn_err
* and indicates that we must now make a quorum decision in the future,
* though we might be doing so after waiting for holds to drain. Here
* we'll be dropping the hold from conn_err. */
void o2quo_hb_still_up(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
mlog(0, "node %u\n", node);
qs->qs_pending = 1;
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* This is analogous to hb_up. as a node's connection comes up we delay the
* quorum decision until we see it heartbeating. the hold will be droped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we might be dropping a hold that conn_up got.
* */
void o2quo_conn_up(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
qs->qs_connected++;
mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
"node %u\n", node);
mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
set_bit(node, qs->qs_conn_bm);
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
if (!test_bit(node, qs->qs_hb_bm))
o2quo_set_hold(qs, node);
else
o2quo_clear_hold(qs, node);
spin_unlock_bh(&qs->qs_lock);
}
/* we've decided that we won't ever be connecting to the node again. if it's
* still heartbeating we grab a hold that will delay decisions until either the
* node stops heartbeating from hb_down or the caller decides that the node is
* still up and calls still_up */
void o2quo_conn_err(u8 node)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_bh(&qs->qs_lock);
if (test_bit(node, qs->qs_conn_bm)) {
qs->qs_connected--;
mlog_bug_on_msg(qs->qs_connected < 0,
"node %u, connected %d\n",
node, qs->qs_connected);
clear_bit(node, qs->qs_conn_bm);
if (test_bit(node, qs->qs_hb_bm))
o2quo_set_hold(qs, node);
}
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
spin_unlock_bh(&qs->qs_lock);
}
void o2quo_init(void)
{
struct o2quo_state *qs = &o2quo_state;
spin_lock_init(&qs->qs_lock);
INIT_WORK(&qs->qs_work, o2quo_make_decision);
}
void o2quo_exit(void)
{
struct o2quo_state *qs = &o2quo_state;
flush_work(&qs->qs_work);
}
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_UC_TYPES_H_
#define _XE_UC_TYPES_H_
#include "xe_gsc_types.h"
#include "xe_guc_types.h"
#include "xe_huc_types.h"
#include "xe_wopcm_types.h"
/**
* struct xe_uc - XE micro controllers
*/
struct xe_uc {
/** @guc: Graphics micro controller */
struct xe_guc guc;
/** @huc: HuC */
struct xe_huc huc;
/** @gsc: Graphics Security Controller */
struct xe_gsc gsc;
/** @wopcm: WOPCM */
struct xe_wopcm wopcm;
};
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include "alloc_api.h"
static int alloc_test_flags = TEST_F_NONE;
static inline const char * const get_memblock_alloc_name(int flags)
{
if (flags & TEST_F_RAW)
return "memblock_alloc_raw";
return "memblock_alloc";
}
static inline void *run_memblock_alloc(phys_addr_t size, phys_addr_t align)
{
if (alloc_test_flags & TEST_F_RAW)
return memblock_alloc_raw(size, align);
return memblock_alloc(size, align);
}
/*
* A simple test that tries to allocate a small memory region.
* Expect to allocate an aligned region near the end of the available memory.
*/
static int alloc_top_down_simple_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t size = SZ_2;
phys_addr_t expected_start;
PREFIX_PUSH();
setup_memblock();
expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
allocated_ptr = run_memblock_alloc(size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, size, alloc_test_flags);
ASSERT_EQ(rgn->size, size);
ASSERT_EQ(rgn->base, expected_start);
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory next to a reserved region that starts at
* the misaligned address. Expect to create two separate entries, with the new
* entry aligned to the provided alignment:
*
* +
* | +--------+ +--------|
* | | rgn2 | | rgn1 |
* +------------+--------+---------+--------+
* ^
* |
* Aligned address boundary
*
* The allocation direction is top-down and region arrays are sorted from lower
* to higher addresses, so the new region will be the first entry in
* memory.reserved array. The previously reserved region does not get modified.
* Region counter and total size get updated.
*/
static int alloc_top_down_disjoint_check(void)
{
/* After allocation, this will point to the "old" region */
struct memblock_region *rgn1 = &memblock.reserved.regions[1];
struct memblock_region *rgn2 = &memblock.reserved.regions[0];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
phys_addr_t total_size;
phys_addr_t expected_start;
PREFIX_PUSH();
setup_memblock();
r1.base = memblock_end_of_DRAM() - SZ_2;
r1.size = SZ_2;
total_size = r1.size + r2_size;
expected_start = memblock_end_of_DRAM() - alignment;
memblock_reserve(r1.base, r1.size);
allocated_ptr = run_memblock_alloc(r2_size, alignment);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
ASSERT_EQ(rgn1->size, r1.size);
ASSERT_EQ(rgn1->base, r1.base);
ASSERT_EQ(rgn2->size, r2_size);
ASSERT_EQ(rgn2->base, expected_start);
ASSERT_EQ(memblock.reserved.cnt, 2);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there is enough space at the end
* of the previously reserved block (i.e. first fit):
*
* | +--------+--------------|
* | | r1 | r2 |
* +--------------+--------+--------------+
*
* Expect a merge of both regions. Only the region size gets updated.
*/
static int alloc_top_down_before_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
/*
* The first region ends at the aligned address to test region merging
*/
phys_addr_t r1_size = SMP_CACHE_BYTES;
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size = r1_size + r2_size;
PREFIX_PUSH();
setup_memblock();
memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size);
allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
ASSERT_EQ(rgn->size, total_size);
ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size);
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there is not enough space at the
* end of the previously reserved block (i.e. second fit):
*
* | +-----------+------+ |
* | | r2 | r1 | |
* +------------+-----------+------+-----+
*
* Expect a merge of both regions. Both the base address and size of the region
* get updated.
*/
static int alloc_top_down_after_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
PREFIX_PUSH();
setup_memblock();
/*
* The first region starts at the aligned address to test region merging
*/
r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
r1.size = SZ_8;
total_size = r1.size + r2_size;
memblock_reserve(r1.base, r1.size);
allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
ASSERT_EQ(rgn->size, total_size);
ASSERT_EQ(rgn->base, r1.base - r2_size);
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there are two reserved regions with
* a gap too small to fit the new region:
*
* | +--------+----------+ +------|
* | | r3 | r2 | | r1 |
* +-------+--------+----------+---+------+
*
* Expect to allocate a region before the one that starts at the lower address,
* and merge them into one. The region counter and total size fields get
* updated.
*/
static int alloc_top_down_second_fit_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1, r2;
void *allocated_ptr = NULL;
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
PREFIX_PUSH();
setup_memblock();
r1.base = memblock_end_of_DRAM() - SZ_512;
r1.size = SZ_512;
r2.base = r1.base - SZ_512;
r2.size = SZ_256;
total_size = r1.size + r2.size + r3_size;
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r3_size, alloc_test_flags);
ASSERT_EQ(rgn->size, r2.size + r3_size);
ASSERT_EQ(rgn->base, r2.base - r3_size);
ASSERT_EQ(memblock.reserved.cnt, 2);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there are two reserved regions with
* a gap big enough to accommodate the new region:
*
* | +--------+--------+--------+ |
* | | r2 | r3 | r1 | |
* +-----+--------+--------+--------+-----+
*
* Expect to merge all of them, creating one big entry in memblock.reserved
* array. The region counter and total size fields get updated.
*/
static int alloc_in_between_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1, r2;
void *allocated_ptr = NULL;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t r3_size = SZ_64;
/*
* Calculate regions size so there's just enough space for the new entry
*/
phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2;
phys_addr_t total_size;
PREFIX_PUSH();
setup_memblock();
r1.size = rgn_size;
r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size);
r2.size = rgn_size;
r2.base = memblock_start_of_DRAM() + gap_size;
total_size = r1.size + r2.size + r3_size;
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r3_size, alloc_test_flags);
ASSERT_EQ(rgn->size, total_size);
ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size);
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when the memory is filled with reserved
* regions with memory gaps too small to fit the new region:
*
* +-------+
* | new |
* +--+----+
* | +-----+ +-----+ +-----+ |
* | | res | | res | | res | |
* +----+-----+----+-----+----+-----+----+
*
* Expect no allocation to happen.
*/
static int alloc_small_gaps_generic_check(void)
{
void *allocated_ptr = NULL;
phys_addr_t region_size = SZ_1K;
phys_addr_t gap_size = SZ_256;
phys_addr_t region_end;
PREFIX_PUSH();
setup_memblock();
region_end = memblock_start_of_DRAM();
while (region_end < memblock_end_of_DRAM()) {
memblock_reserve(region_end + gap_size, region_size);
region_end += gap_size + region_size;
}
allocated_ptr = run_memblock_alloc(region_size, SMP_CACHE_BYTES);
ASSERT_EQ(allocated_ptr, NULL);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when all memory is reserved.
* Expect no allocation to happen.
*/
static int alloc_all_reserved_generic_check(void)
{
void *allocated_ptr = NULL;
PREFIX_PUSH();
setup_memblock();
/* Simulate full memory */
memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE);
allocated_ptr = run_memblock_alloc(SZ_256, SMP_CACHE_BYTES);
ASSERT_EQ(allocated_ptr, NULL);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when the memory is almost full,
* with not enough space left for the new region:
*
* +-------+
* | new |
* +-------+
* |-----------------------------+ |
* | reserved | |
* +-----------------------------+---+
*
* Expect no allocation to happen.
*/
static int alloc_no_space_generic_check(void)
{
void *allocated_ptr = NULL;
phys_addr_t available_size = SZ_256;
phys_addr_t reserved_size = MEM_SIZE - available_size;
PREFIX_PUSH();
setup_memblock();
/* Simulate almost-full memory */
memblock_reserve(memblock_start_of_DRAM(), reserved_size);
allocated_ptr = run_memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
ASSERT_EQ(allocated_ptr, NULL);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when the memory is almost full,
* but there is just enough space left:
*
* |---------------------------+---------|
* | reserved | new |
* +---------------------------+---------+
*
* Expect to allocate memory and merge all the regions. The total size field
* gets updated.
*/
static int alloc_limited_space_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t available_size = SZ_256;
phys_addr_t reserved_size = MEM_SIZE - available_size;
PREFIX_PUSH();
setup_memblock();
/* Simulate almost-full memory */
memblock_reserve(memblock_start_of_DRAM(), reserved_size);
allocated_ptr = run_memblock_alloc(available_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, available_size, alloc_test_flags);
ASSERT_EQ(rgn->size, MEM_SIZE);
ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there is no available memory
* registered (i.e. memblock.memory has only a dummy entry).
* Expect no allocation to happen.
*/
static int alloc_no_memory_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
PREFIX_PUSH();
reset_memblock_regions();
allocated_ptr = run_memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
ASSERT_EQ(allocated_ptr, NULL);
ASSERT_EQ(rgn->size, 0);
ASSERT_EQ(rgn->base, 0);
ASSERT_EQ(memblock.reserved.total_size, 0);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate a region that is larger than the total size of
* available memory (memblock.memory):
*
* +-----------------------------------+
* | new |
* +-----------------------------------+
* | |
* | |
* +---------------------------------+
*
* Expect no allocation to happen.
*/
static int alloc_too_large_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
PREFIX_PUSH();
setup_memblock();
allocated_ptr = run_memblock_alloc(MEM_SIZE + SZ_2, SMP_CACHE_BYTES);
ASSERT_EQ(allocated_ptr, NULL);
ASSERT_EQ(rgn->size, 0);
ASSERT_EQ(rgn->base, 0);
ASSERT_EQ(memblock.reserved.total_size, 0);
test_pass_pop();
return 0;
}
/*
* A simple test that tries to allocate a small memory region.
* Expect to allocate an aligned region at the beginning of the available
* memory.
*/
static int alloc_bottom_up_simple_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
PREFIX_PUSH();
setup_memblock();
allocated_ptr = run_memblock_alloc(SZ_2, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, SZ_2, alloc_test_flags);
ASSERT_EQ(rgn->size, SZ_2);
ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, SZ_2);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory next to a reserved region that starts at
* the misaligned address. Expect to create two separate entries, with the new
* entry aligned to the provided alignment:
*
* +
* | +----------+ +----------+ |
* | | rgn1 | | rgn2 | |
* +----+----------+---+----------+-----+
* ^
* |
* Aligned address boundary
*
* The allocation direction is bottom-up, so the new region will be the second
* entry in memory.reserved array. The previously reserved region does not get
* modified. Region counter and total size get updated.
*/
static int alloc_bottom_up_disjoint_check(void)
{
struct memblock_region *rgn1 = &memblock.reserved.regions[0];
struct memblock_region *rgn2 = &memblock.reserved.regions[1];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
phys_addr_t total_size;
phys_addr_t expected_start;
PREFIX_PUSH();
setup_memblock();
r1.base = memblock_start_of_DRAM() + SZ_2;
r1.size = SZ_2;
total_size = r1.size + r2_size;
expected_start = memblock_start_of_DRAM() + alignment;
memblock_reserve(r1.base, r1.size);
allocated_ptr = run_memblock_alloc(r2_size, alignment);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
ASSERT_EQ(rgn1->size, r1.size);
ASSERT_EQ(rgn1->base, r1.base);
ASSERT_EQ(rgn2->size, r2_size);
ASSERT_EQ(rgn2->base, expected_start);
ASSERT_EQ(memblock.reserved.cnt, 2);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there is enough space at
* the beginning of the previously reserved block (i.e. first fit):
*
* |------------------+--------+ |
* | r1 | r2 | |
* +------------------+--------+---------+
*
* Expect a merge of both regions. Only the region size gets updated.
*/
static int alloc_bottom_up_before_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_512;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
PREFIX_PUSH();
setup_memblock();
memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size);
allocated_ptr = run_memblock_alloc(r1_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r1_size, alloc_test_flags);
ASSERT_EQ(rgn->size, total_size);
ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there is not enough space at
* the beginning of the previously reserved block (i.e. second fit):
*
* | +--------+--------------+ |
* | | r1 | r2 | |
* +----+--------+--------------+---------+
*
* Expect a merge of both regions. Only the region size gets updated.
*/
static int alloc_bottom_up_after_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
PREFIX_PUSH();
setup_memblock();
/*
* The first region starts at the aligned address to test region merging
*/
r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
r1.size = SZ_64;
total_size = r1.size + r2_size;
memblock_reserve(r1.base, r1.size);
allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
ASSERT_EQ(rgn->size, total_size);
ASSERT_EQ(rgn->base, r1.base);
ASSERT_EQ(memblock.reserved.cnt, 1);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/*
* A test that tries to allocate memory when there are two reserved regions, the
* first one starting at the beginning of the available memory, with a gap too
* small to fit the new region:
*
* |------------+ +--------+--------+ |
* | r1 | | r2 | r3 | |
* +------------+-----+--------+--------+--+
*
* Expect to allocate after the second region, which starts at the higher
* address, and merge them into one. The region counter and total size fields
* get updated.
*/
static int alloc_bottom_up_second_fit_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[1];
struct region r1, r2;
void *allocated_ptr = NULL;
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
PREFIX_PUSH();
setup_memblock();
r1.base = memblock_start_of_DRAM();
r1.size = SZ_512;
r2.base = r1.base + r1.size + SZ_512;
r2.size = SZ_256;
total_size = r1.size + r2.size + r3_size;
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
ASSERT_NE(allocated_ptr, NULL);
assert_mem_content(allocated_ptr, r3_size, alloc_test_flags);
ASSERT_EQ(rgn->size, r2.size + r3_size);
ASSERT_EQ(rgn->base, r2.base);
ASSERT_EQ(memblock.reserved.cnt, 2);
ASSERT_EQ(memblock.reserved.total_size, total_size);
test_pass_pop();
return 0;
}
/* Test case wrappers */
static int alloc_simple_check(void)
{
test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_simple_check();
memblock_set_bottom_up(true);
alloc_bottom_up_simple_check();
return 0;
}
static int alloc_disjoint_check(void)
{
test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_disjoint_check();
memblock_set_bottom_up(true);
alloc_bottom_up_disjoint_check();
return 0;
}
static int alloc_before_check(void)
{
test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_before_check();
memblock_set_bottom_up(true);
alloc_bottom_up_before_check();
return 0;
}
static int alloc_after_check(void)
{
test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_after_check();
memblock_set_bottom_up(true);
alloc_bottom_up_after_check();
return 0;
}
static int alloc_in_between_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_in_between_generic_check);
run_bottom_up(alloc_in_between_generic_check);
return 0;
}
static int alloc_second_fit_check(void)
{
test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_second_fit_check();
memblock_set_bottom_up(true);
alloc_bottom_up_second_fit_check();
return 0;
}
static int alloc_small_gaps_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_small_gaps_generic_check);
run_bottom_up(alloc_small_gaps_generic_check);
return 0;
}
static int alloc_all_reserved_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_all_reserved_generic_check);
run_bottom_up(alloc_all_reserved_generic_check);
return 0;
}
static int alloc_no_space_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_no_space_generic_check);
run_bottom_up(alloc_no_space_generic_check);
return 0;
}
static int alloc_limited_space_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_limited_space_generic_check);
run_bottom_up(alloc_limited_space_generic_check);
return 0;
}
static int alloc_no_memory_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_no_memory_generic_check);
run_bottom_up(alloc_no_memory_generic_check);
return 0;
}
static int alloc_too_large_check(void)
{
test_print("\tRunning %s...\n", __func__);
run_top_down(alloc_too_large_generic_check);
run_bottom_up(alloc_too_large_generic_check);
return 0;
}
static int memblock_alloc_checks_internal(int flags)
{
const char *func = get_memblock_alloc_name(flags);
alloc_test_flags = flags;
prefix_reset();
prefix_push(func);
test_print("Running %s tests...\n", func);
reset_memblock_attributes();
dummy_physical_memory_init();
alloc_simple_check();
alloc_disjoint_check();
alloc_before_check();
alloc_after_check();
alloc_second_fit_check();
alloc_small_gaps_check();
alloc_in_between_check();
alloc_all_reserved_check();
alloc_no_space_check();
alloc_limited_space_check();
alloc_no_memory_check();
alloc_too_large_check();
dummy_physical_memory_cleanup();
prefix_pop();
return 0;
}
int memblock_alloc_checks(void)
{
memblock_alloc_checks_internal(TEST_F_NONE);
memblock_alloc_checks_internal(TEST_F_RAW);
return 0;
}
|
/* SPDX-License-Identifier: BSD-3-Clause */
#ifndef _VMBUS_BUF_H_
#define _VMBUS_BUF_H_
#include <stdbool.h>
#include <stdint.h>
#define __packed __attribute__((__packed__))
#define unlikely(x) __builtin_expect(!!(x), 0)
#define ICMSGHDRFLAG_TRANSACTION 1
#define ICMSGHDRFLAG_REQUEST 2
#define ICMSGHDRFLAG_RESPONSE 4
#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
(ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
(((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
/*
* Channel packets
*/
/* Channel packet flags */
#define VMBUS_CHANPKT_TYPE_INBAND 0x0006
#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007
#define VMBUS_CHANPKT_TYPE_GPA 0x0009
#define VMBUS_CHANPKT_TYPE_COMP 0x000b
#define VMBUS_CHANPKT_FLAG_NONE 0
#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */
#define VMBUS_CHANPKT_SIZE_SHIFT 3
#define VMBUS_CHANPKT_SIZE_ALIGN BIT(VMBUS_CHANPKT_SIZE_SHIFT)
#define VMBUS_CHANPKT_HLEN_MIN \
(sizeof(struct vmbus_chanpkt_hdr) >> VMBUS_CHANPKT_SIZE_SHIFT)
/*
* Buffer ring
*/
struct vmbus_bufring {
volatile uint32_t windex;
volatile uint32_t rindex;
/*
* Interrupt mask {0,1}
*
* For TX bufring, host set this to 1, when it is processing
* the TX bufring, so that we can safely skip the TX event
* notification to host.
*
* For RX bufring, once this is set to 1 by us, host will not
* further dispatch interrupts to us, even if there are data
* pending on the RX bufring. This effectively disables the
* interrupt of the channel to which this RX bufring is attached.
*/
volatile uint32_t imask;
/*
* Win8 uses some of the reserved bits to implement
* interrupt driven flow management. On the send side
* we can request that the receiver interrupt the sender
* when the ring transitions from being full to being able
* to handle a message of size "pending_send_sz".
*
* Add necessary state for this enhancement.
*/
volatile uint32_t pending_send;
uint32_t reserved1[12];
union {
struct {
uint32_t feat_pending_send_sz:1;
};
uint32_t value;
} feature_bits;
/* Pad it to rte_mem_page_size() so that data starts on page boundary */
uint8_t reserved2[4028];
/*
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
uint8_t data[];
} __packed;
struct vmbus_br {
struct vmbus_bufring *vbr;
uint32_t dsize;
uint32_t windex; /* next available location */
};
struct vmbus_chanpkt_hdr {
uint16_t type; /* VMBUS_CHANPKT_TYPE_ */
uint16_t hlen; /* header len, in 8 bytes */
uint16_t tlen; /* total len, in 8 bytes */
uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */
uint64_t xactid;
} __packed;
struct vmbus_chanpkt {
struct vmbus_chanpkt_hdr hdr;
} __packed;
struct vmbuspipe_hdr {
unsigned int flags;
unsigned int msgsize;
} __packed;
struct ic_version {
unsigned short major;
unsigned short minor;
} __packed;
struct icmsg_negotiate {
unsigned short icframe_vercnt;
unsigned short icmsg_vercnt;
unsigned int reserved;
struct ic_version icversion_data[]; /* any size array */
} __packed;
struct icmsg_hdr {
struct ic_version icverframe;
unsigned short icmsgtype;
struct ic_version icvermsg;
unsigned short icmsgsize;
unsigned int status;
unsigned char ictransaction_id;
unsigned char icflags;
unsigned char reserved[2];
} __packed;
int rte_vmbus_chan_recv_raw(struct vmbus_br *rxbr, void *data, uint32_t *len);
int rte_vmbus_chan_send(struct vmbus_br *txbr, uint16_t type, void *data,
uint32_t dlen, uint32_t flags);
void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen);
void *vmbus_uio_map(int *fd, int size);
/* Amount of space available for write */
static inline uint32_t vmbus_br_availwrite(const struct vmbus_br *br, uint32_t windex)
{
uint32_t rindex = br->vbr->rindex;
if (windex >= rindex)
return br->dsize - (windex - rindex);
else
return rindex - windex;
}
static inline uint32_t vmbus_br_availread(const struct vmbus_br *br)
{
return br->dsize - vmbus_br_availwrite(br, br->vbr->windex);
}
#endif /* !_VMBUS_BUF_H_ */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2023 Red Hat
*/
#ifndef UDS_RADIX_SORT_H
#define UDS_RADIX_SORT_H
/*
* Radix sort is implemented using an American Flag sort, an unstable, in-place 8-bit radix
* exchange sort. This is adapted from the algorithm in the paper by Peter M. McIlroy, Keith
* Bostic, and M. Douglas McIlroy, "Engineering Radix Sort".
*
* http://www.usenix.org/publications/compsystems/1993/win_mcilroy.pdf
*/
struct radix_sorter;
int __must_check uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter);
void uds_free_radix_sorter(struct radix_sorter *sorter);
int __must_check uds_radix_sort(struct radix_sorter *sorter, const unsigned char *keys[],
unsigned int count, unsigned short length);
#endif /* UDS_RADIX_SORT_H */
|
// SPDX-License-Identifier: MIT
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_dma.h"
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_chan.h"
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
/**
* DOC: Overview
*
* Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
* DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
*
* In order to use the UAPI firstly a user client must initialize the VA space
* using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
* should be managed by the kernel and which by the UMD.
*
* The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
* userspace-managable portion of the VA space. It provides operations to map
* and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
* backed by a GEM object and the kernel will ignore GEM handles provided
* alongside a sparse mapping.
*
* Userspace may request memory backed mappings either within or outside of the
* bounds (but not crossing those bounds) of a previously mapped sparse
* mapping. Subsequently requested memory backed mappings within a sparse
* mapping will take precedence over the corresponding range of the sparse
* mapping. If such memory backed mappings are unmapped the kernel will make
* sure that the corresponding sparse mapping will take their place again.
* Requests to unmap a sparse mapping that still contains memory backed mappings
* will result in those memory backed mappings being unmapped first.
*
* Unmap requests are not bound to the range of existing mappings and can even
* overlap the bounds of sparse mappings. For such a request the kernel will
* make sure to unmap all memory backed mappings within the given range,
* splitting up memory backed mappings which are only partially contained
* within the given range. Unmap requests with the sparse flag set must match
* the range of a previously mapped sparse mapping exactly though.
*
* While the kernel generally permits arbitrary sequences and ranges of memory
* backed mappings being mapped and unmapped, either within a single or multiple
* VM_BIND ioctl calls, there are some restrictions for sparse mappings.
*
* The kernel does not permit to:
* - unmap non-existent sparse mappings
* - unmap a sparse mapping and map a new sparse mapping overlapping the range
* of the previously unmapped sparse mapping within the same VM_BIND ioctl
* - unmap a sparse mapping and map new memory backed mappings overlapping the
* range of the previously unmapped sparse mapping within the same VM_BIND
* ioctl
*
* When using the VM_BIND ioctl to request the kernel to map memory to a given
* virtual address in the GPU's VA space there is no guarantee that the actual
* mappings are created in the GPU's MMU. If the given memory is swapped out
* at the time the bind operation is executed the kernel will stash the mapping
* details into it's internal alloctor and create the actual MMU mappings once
* the memory is swapped back in. While this is transparent for userspace, it is
* guaranteed that all the backing memory is swapped back in and all the memory
* mappings, as requested by userspace previously, are actually mapped once the
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
*
* A VM_BIND job can be executed either synchronously or asynchronously. If
* exectued asynchronously, userspace may provide a list of syncobjs this job
* will wait for and/or a list of syncobj the kernel will signal once the
* VM_BIND job finished execution. If executed synchronously the ioctl will
* block until the bind job is finished. For synchronous jobs the kernel will
* not permit any syncobjs submitted to the kernel.
*
* To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
* jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
* the option to synchronize them with syncobjs.
*
* Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
*
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
* an up to date view of the VA space. However, the actual mappings might still
* be pending. Hence, EXEC jobs require to have the particular fences - of
* the corresponding VM_BIND jobs they depent on - attached to them.
*/
static int
nouveau_exec_job_submit(struct nouveau_job *job,
struct drm_gpuvm_exec *vme)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_cli *cli = job->cli;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
int ret;
/* Create a new fence, but do not emit yet. */
ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
if (ret)
return ret;
nouveau_uvmm_lock(uvmm);
ret = drm_gpuvm_exec_lock(vme);
if (ret) {
nouveau_uvmm_unlock(uvmm);
return ret;
}
nouveau_uvmm_unlock(uvmm);
ret = drm_gpuvm_exec_validate(vme);
if (ret) {
drm_gpuvm_exec_unlock(vme);
return ret;
}
return 0;
}
static void
nouveau_exec_job_armed_submit(struct nouveau_job *job,
struct drm_gpuvm_exec *vme)
{
drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
job->resv_usage, job->resv_usage);
drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
nouveau_exec_job_run(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_channel *chan = exec_job->chan;
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
}
for (i = 0; i < exec_job->push.count; i++) {
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
/* The fence was emitted successfully, set the job's fence pointer to
* NULL in order to avoid freeing it up when the job is cleaned up.
*/
exec_job->fence = NULL;
return &fence->base;
}
static void
nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
nouveau_job_done(job);
nouveau_job_free(job);
kfree(exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}
static enum drm_gpu_sched_stat
nouveau_exec_job_timeout(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_channel *chan = exec_job->chan;
if (unlikely(!atomic_read(&chan->killed)))
nouveau_channel_kill(chan);
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
static const struct nouveau_job_ops nouveau_exec_job_ops = {
.submit = nouveau_exec_job_submit,
.armed_submit = nouveau_exec_job_armed_submit,
.run = nouveau_exec_job_run,
.free = nouveau_exec_job_free,
.timeout = nouveau_exec_job_timeout,
};
int
nouveau_exec_job_init(struct nouveau_exec_job **pjob,
struct nouveau_exec_job_args *__args)
{
struct nouveau_exec_job *job;
struct nouveau_job_args args = {};
int i, ret;
for (i = 0; i < __args->push.count; i++) {
struct drm_nouveau_exec_push *p = &__args->push.s[i];
if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
NV_PRINTK(err, nouveau_cli(__args->file_priv),
"pushbuf size exceeds limit: 0x%x max 0x%x\n",
p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
return -EINVAL;
}
}
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
return -ENOMEM;
job->push.count = __args->push.count;
if (__args->push.count) {
job->push.s = kmemdup(__args->push.s,
sizeof(*__args->push.s) *
__args->push.count,
GFP_KERNEL);
if (!job->push.s) {
ret = -ENOMEM;
goto err_free_job;
}
}
args.file_priv = __args->file_priv;
job->chan = __args->chan;
args.sched = __args->sched;
/* Plus one to account for the HW fence. */
args.credits = job->push.count + 1;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
args.out_sync.count = __args->out_sync.count;
args.out_sync.s = __args->out_sync.s;
args.ops = &nouveau_exec_job_ops;
args.resv_usage = DMA_RESV_USAGE_WRITE;
ret = nouveau_job_init(&job->base, &args);
if (ret)
goto err_free_pushs;
return 0;
err_free_pushs:
kfree(job->push.s);
err_free_job:
kfree(job);
*pjob = NULL;
return ret;
}
static int
nouveau_exec(struct nouveau_exec_job_args *args)
{
struct nouveau_exec_job *job;
int ret;
ret = nouveau_exec_job_init(&job, args);
if (ret)
return ret;
ret = nouveau_job_submit(&job->base);
if (ret)
goto err_job_fini;
return 0;
err_job_fini:
nouveau_job_fini(&job->base);
return ret;
}
static int
nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
struct drm_nouveau_exec *req)
{
struct drm_nouveau_sync **s;
u32 inc = req->wait_count;
u64 ins = req->wait_ptr;
u32 outc = req->sig_count;
u64 outs = req->sig_ptr;
u32 pushc = req->push_count;
u64 pushs = req->push_ptr;
int ret;
if (pushc) {
args->push.count = pushc;
args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
if (IS_ERR(args->push.s))
return PTR_ERR(args->push.s);
}
if (inc) {
s = &args->in_sync.s;
args->in_sync.count = inc;
*s = u_memcpya(ins, inc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_pushs;
}
}
if (outc) {
s = &args->out_sync.s;
args->out_sync.count = outc;
*s = u_memcpya(outs, outc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_ins;
}
}
return 0;
err_free_pushs:
u_free(args->push.s);
err_free_ins:
u_free(args->in_sync.s);
return ret;
}
static void
nouveau_exec_ufree(struct nouveau_exec_job_args *args)
{
u_free(args->push.s);
u_free(args->in_sync.s);
u_free(args->out_sync.s);
}
int
nouveau_exec_ioctl_exec(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *chan16;
struct nouveau_channel *chan = NULL;
struct nouveau_exec_job_args args = {};
struct drm_nouveau_exec *req = data;
int push_max, ret = 0;
if (unlikely(!abi16))
return -ENOMEM;
/* abi16 locks already */
if (unlikely(!nouveau_cli_uvmm(cli)))
return nouveau_abi16_put(abi16, -ENOSYS);
list_for_each_entry(chan16, &abi16->channels, head) {
if (chan16->chan->chid == req->channel) {
chan = chan16->chan;
break;
}
}
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
if (!chan->dma.ib_max)
return nouveau_abi16_put(abi16, -ENOSYS);
push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, push_max);
return nouveau_abi16_put(abi16, -EINVAL);
}
ret = nouveau_exec_ucopy(&args, req);
if (ret)
goto out;
args.sched = chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
ret = nouveau_exec(&args);
if (ret)
goto out_free_args;
out_free_args:
nouveau_exec_ufree(&args);
out:
return nouveau_abi16_put(abi16, ret);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/* Unstable XFRM state BPF helpers.
*
* Note that it is allowed to break compatibility for these functions since the
* interface they are exposed through to BPF programs is explicitly unstable.
*/
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <net/xdp.h>
#include <net/xfrm.h>
/* bpf_xfrm_state_opts - Options for XFRM state lookup helpers
*
* Members:
* @error - Out parameter, set for any errors encountered
* Values:
* -EINVAL - netns_id is less than -1
* -EINVAL - opts__sz isn't BPF_XFRM_STATE_OPTS_SZ
* -ENONET - No network namespace found for netns_id
* -ENOENT - No xfrm_state found
* @netns_id - Specify the network namespace for lookup
* Values:
* BPF_F_CURRENT_NETNS (-1)
* Use namespace associated with ctx
* [0, S32_MAX]
* Network Namespace ID
* @mark - XFRM mark to match on
* @daddr - Destination address to match on
* @spi - Security parameter index to match on
* @proto - IP protocol to match on (eg. IPPROTO_ESP)
* @family - Protocol family to match on (AF_INET/AF_INET6)
*/
struct bpf_xfrm_state_opts {
s32 error;
s32 netns_id;
u32 mark;
xfrm_address_t daddr;
__be32 spi;
u8 proto;
u16 family;
};
enum {
BPF_XFRM_STATE_OPTS_SZ = sizeof(struct bpf_xfrm_state_opts),
};
__bpf_kfunc_start_defs();
/* bpf_xdp_get_xfrm_state - Get XFRM state
*
* A `struct xfrm_state *`, if found, must be released with a corresponding
* bpf_xdp_xfrm_state_release.
*
* Parameters:
* @ctx - Pointer to ctx (xdp_md) in XDP program
* Cannot be NULL
* @opts - Options for lookup (documented above)
* Cannot be NULL
* @opts__sz - Length of the bpf_xfrm_state_opts structure
* Must be BPF_XFRM_STATE_OPTS_SZ
*/
__bpf_kfunc struct xfrm_state *
bpf_xdp_get_xfrm_state(struct xdp_md *ctx, struct bpf_xfrm_state_opts *opts, u32 opts__sz)
{
struct xdp_buff *xdp = (struct xdp_buff *)ctx;
struct net *net = dev_net(xdp->rxq->dev);
struct xfrm_state *x;
if (!opts || opts__sz < sizeof(opts->error))
return NULL;
if (opts__sz != BPF_XFRM_STATE_OPTS_SZ) {
opts->error = -EINVAL;
return NULL;
}
if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS)) {
opts->error = -EINVAL;
return NULL;
}
if (opts->netns_id >= 0) {
net = get_net_ns_by_id(net, opts->netns_id);
if (unlikely(!net)) {
opts->error = -ENONET;
return NULL;
}
}
x = xfrm_state_lookup(net, opts->mark, &opts->daddr, opts->spi,
opts->proto, opts->family);
if (opts->netns_id >= 0)
put_net(net);
if (!x)
opts->error = -ENOENT;
return x;
}
/* bpf_xdp_xfrm_state_release - Release acquired xfrm_state object
*
* This must be invoked for referenced PTR_TO_BTF_ID, and the verifier rejects
* the program if any references remain in the program in all of the explored
* states.
*
* Parameters:
* @x - Pointer to referenced xfrm_state object, obtained using
* bpf_xdp_get_xfrm_state.
*/
__bpf_kfunc void bpf_xdp_xfrm_state_release(struct xfrm_state *x)
{
xfrm_state_put(x);
}
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(xfrm_state_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_get_xfrm_state, KF_RET_NULL | KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_xdp_xfrm_state_release, KF_RELEASE)
BTF_KFUNCS_END(xfrm_state_kfunc_set)
static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
.owner = THIS_MODULE,
.set = &xfrm_state_kfunc_set,
};
int __init register_xfrm_state_bpf(void)
{
return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
&xfrm_state_xdp_kfunc_set);
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
* Copyright (C) 2007-2014 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
*
* Sensoray 2255 device supports 4 simultaneous channels.
* The channels are not "crossbar" inputs, they are physically
* attached to separate video decoders.
*
* Because of USB2.0 bandwidth limitations. There is only a
* certain amount of data which may be transferred at one time.
*
* Example maximum bandwidth utilization:
*
* -full size, color mode YUYV or YUV422P: 2 channels at once
* -full or half size Grey scale: all 4 channels at once
* -half size, color mode YUYV or YUV422P: all 4 channels at once
* -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels
* at once.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#define S2255_VERSION "1.25.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
#define S2255_DEF_JPEG_QUAL 50
/* vendor request in */
#define S2255_VR_IN 0
/* vendor request out */
#define S2255_VR_OUT 1
/* firmware query */
#define S2255_VR_FW 0x30
/* USB endpoint number for configuring the device */
#define S2255_CONFIG_EP 2
/* maximum time for DSP to start responding after last FW word loaded(ms) */
#define S2255_DSP_BOOTTIME 800
/* maximum time to wait for firmware to load (ms) */
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
#define S2255_MIN_BUFS 2
#define S2255_SETMODE_TIMEOUT 500
#define S2255_VIDSTATUS_TIMEOUT 350
#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
#define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL)
#define S2255_RESPONSE_SETMODE cpu_to_le32(0x01)
#define S2255_RESPONSE_FW cpu_to_le32(0x10)
#define S2255_RESPONSE_STATUS cpu_to_le32(0x20)
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
#define SYS_FRAMES 4
/* maximum size is PAL full size plus room for the marker header(s) */
#define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096)
#define DEF_USB_BLOCK S2255_USB_XFER_SIZE
#define LINE_SZ_4CIFS_NTSC 640
#define LINE_SZ_2CIFS_NTSC 640
#define LINE_SZ_1CIFS_NTSC 320
#define LINE_SZ_4CIFS_PAL 704
#define LINE_SZ_2CIFS_PAL 704
#define LINE_SZ_1CIFS_PAL 352
#define NUM_LINES_4CIFS_NTSC 240
#define NUM_LINES_2CIFS_NTSC 240
#define NUM_LINES_1CIFS_NTSC 240
#define NUM_LINES_4CIFS_PAL 288
#define NUM_LINES_2CIFS_PAL 288
#define NUM_LINES_1CIFS_PAL 288
#define LINE_SZ_DEF 640
#define NUM_LINES_DEF 240
/* predefined settings */
#define FORMAT_NTSC 1
#define FORMAT_PAL 2
#define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */
#define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */
#define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */
/* SCALE_4CIFSI is the 2 fields interpolated into one */
#define SCALE_4CIFSI 4 /* 640x480(NTSC) or 704x576(PAL) high quality */
#define COLOR_YUVPL 1 /* YUV planar */
#define COLOR_YUVPK 2 /* YUV packed */
#define COLOR_Y8 4 /* monochrome */
#define COLOR_JPG 5 /* JPEG */
#define MASK_COLOR 0x000000ff
#define MASK_JPG_QUALITY 0x0000ff00
#define MASK_INPUT_TYPE 0x000f0000
/* frame decimation. */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
#define FDEC_3 3 /* capture every 3rd frame */
#define FDEC_5 5 /* capture every 5th frame */
/*-------------------------------------------------------
* Default mode parameters.
*-------------------------------------------------------*/
#define DEF_SCALE SCALE_4CIFS
#define DEF_COLOR COLOR_YUVPL
#define DEF_FDEC FDEC_1
#define DEF_BRIGHT 0
#define DEF_CONTRAST 0x5c
#define DEF_SATURATION 0x80
#define DEF_HUE 0
/* usb config commands */
#define IN_DATA_TOKEN cpu_to_le32(0x2255c0de)
#define CMD_2255 0xc2255000
#define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10))
#define CMD_START cpu_to_le32((CMD_2255 | 0x20))
#define CMD_STOP cpu_to_le32((CMD_2255 | 0x30))
#define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40))
struct s2255_mode {
u32 format; /* input video format (NTSC, PAL) */
u32 scale; /* output video scale */
u32 color; /* output video color format */
u32 fdec; /* frame decimation */
u32 bright; /* brightness */
u32 contrast; /* contrast */
u32 saturation; /* saturation */
u32 hue; /* hue (NTSC only)*/
u32 single; /* capture 1 frame at a time (!=0), continuously (==0)*/
u32 usb_block; /* block size. should be 4096 of DEF_USB_BLOCK */
u32 restart; /* if DSP requires restart */
};
#define S2255_READ_IDLE 0
#define S2255_READ_FRAME 1
/* frame structure */
struct s2255_framei {
unsigned long size;
unsigned long ulState; /* ulState:S2255_READ_IDLE, S2255_READ_FRAME*/
void *lpvbits; /* image data */
unsigned long cur_size; /* current data copied to it */
};
/* image buffer structure */
struct s2255_bufferi {
unsigned long dwFrames; /* number of frames in buffer */
struct s2255_framei frame[SYS_FRAMES]; /* array of FRAME structures */
};
#define DEF_MODEI_NTSC_CONT {FORMAT_NTSC, DEF_SCALE, DEF_COLOR, \
DEF_FDEC, DEF_BRIGHT, DEF_CONTRAST, DEF_SATURATION, \
DEF_HUE, 0, DEF_USB_BLOCK, 0}
/* for firmware loading, fw_state */
#define S2255_FW_NOTLOADED 0
#define S2255_FW_LOADED_DSPWAIT 1
#define S2255_FW_SUCCESS 2
#define S2255_FW_FAILED 3
#define S2255_FW_DISCONNECTING 4
#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
/* 2255 read states */
#define S2255_READ_IDLE 0
#define S2255_READ_FRAME 1
struct s2255_fw {
int fw_loaded;
int fw_size;
struct urb *fw_urb;
atomic_t fw_state;
void *pfw_data;
wait_queue_head_t wait_fw;
const struct firmware *fw;
};
struct s2255_pipeinfo {
u32 max_transfer_size;
u32 cur_transfer_size;
u8 *transfer_buffer;
u32 state;
void *stream_urb;
void *dev; /* back pointer to s2255_dev struct*/
u32 err_count;
u32 idx;
};
struct s2255_fmt; /*forward declaration */
struct s2255_dev;
/* 2255 video channel */
struct s2255_vc {
struct s2255_dev *dev;
struct video_device vdev;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *jpegqual_ctrl;
int resources;
struct list_head buf_list;
struct s2255_bufferi buffer;
struct s2255_mode mode;
v4l2_std_id std;
/* jpeg compression */
unsigned jpegqual;
/* capture parameters (for high quality mode full size) */
struct v4l2_captureparm cap_parm;
int cur_frame;
int last_frame;
/* allocated image size */
unsigned long req_image_size;
/* received packet size */
unsigned long pkt_size;
int bad_payload;
unsigned long frame_count;
/* if JPEG image */
int jpg_size;
/* if channel configured to default state */
int configured;
wait_queue_head_t wait_setmode;
int setmode_ready;
/* video status items */
int vidstatus;
wait_queue_head_t wait_vidstatus;
int vidstatus_ready;
unsigned int width;
unsigned int height;
enum v4l2_field field;
const struct s2255_fmt *fmt;
int idx; /* channel number on device, 0-3 */
struct vb2_queue vb_vidq;
struct mutex vb_lock; /* streaming lock */
spinlock_t qlock;
};
struct s2255_dev {
struct s2255_vc vc[MAX_CHANNELS];
struct v4l2_device v4l2_dev;
refcount_t num_channels;
int frames;
struct mutex lock; /* channels[].vdev.lock */
struct mutex cmdlock; /* protects cmdbuf */
struct usb_device *udev;
struct usb_interface *interface;
u8 read_endpoint;
struct timer_list timer;
struct s2255_fw *fw_data;
struct s2255_pipeinfo pipe;
u32 cc; /* current channel */
int frame_ready;
int chn_ready;
/* dsp firmware version (f2255usb.bin) */
int dsp_fw_ver;
u16 pid; /* product id */
#define S2255_CMDBUF_SIZE 512
__le32 *cmdbuf;
};
static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
{
return container_of(v4l2_dev, struct s2255_dev, v4l2_dev);
}
struct s2255_fmt {
u32 fourcc;
int depth;
};
/* buffer for one video frame */
struct s2255_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_v4l2_buffer vb;
struct list_head list;
};
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
/* current DSP FW version */
#define S2255_CUR_DSP_FWVER 10104
/* Need DSP version 5+ for video status feature */
#define S2255_MIN_DSP_STATUS 5
#define S2255_MIN_DSP_COLORFILTER 8
#define S2255_NORMS (V4L2_STD_ALL)
/* private V4L2 controls */
/*
* The following chart displays how COLORFILTER should be set
* =========================================================
* = fourcc = COLORFILTER =
* = ===============================
* = = 0 = 1 =
* =========================================================
* = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome=
* = = s-video or = composite =
* = = B/W camera = input =
* =========================================================
* = other = color, svideo = color, =
* = = = composite =
* =========================================================
*
* Notes:
* channels 0-3 on 2255 are composite
* channels 0-1 on 2257 are composite, 2-3 are s-video
* If COLORFILTER is 0 with a composite color camera connected,
* the output will appear monochrome but hatching
* will occur.
* COLORFILTER is different from "color killer" and "color effects"
* for reasons above.
*/
#define S2255_V4L2_YC_ON 1
#define S2255_V4L2_YC_OFF 0
#define V4L2_CID_S2255_COLORFILTER (V4L2_CID_USER_S2255_BASE + 0)
/* frame prefix size (sent once every frame) */
#define PREFIX_SIZE 512
/* Channels on box are in reverse order */
static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0};
static int debug;
static int s2255_start_readpipe(struct s2255_dev *dev);
static void s2255_stop_readpipe(struct s2255_dev *dev);
static int s2255_start_acquire(struct s2255_vc *vc);
static int s2255_stop_acquire(struct s2255_vc *vc);
static void s2255_fillbuff(struct s2255_vc *vc, struct s2255_buffer *buf,
int jpgsize);
static int s2255_set_mode(struct s2255_vc *vc, struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev);
static void s2255_destroy(struct s2255_dev *dev);
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
u16 index, u16 value, void *buf,
s32 buf_len, int bOut);
/* dev_err macro with driver name */
#define S2255_DRIVER_NAME "s2255"
#define s2255_dev_err(dev, fmt, arg...) \
dev_err(dev, S2255_DRIVER_NAME " - " fmt, ##arg)
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
static struct usb_driver s2255_driver;
/* start video number */
static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
/* Enable jpeg capture. */
static int jpeg_enable = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level(0-100) default 0");
module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
module_param(jpeg_enable, int, 0644);
MODULE_PARM_DESC(jpeg_enable, "Jpeg enable(1-on 0-off) default 1");
/* USB device table */
#define USB_SENSORAY_VID 0x1943
static const struct usb_device_id s2255_table[] = {
{USB_DEVICE(USB_SENSORAY_VID, 0x2255)},
{USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, s2255_table);
#define BUFFER_TIMEOUT msecs_to_jiffies(400)
/* image formats. */
/* JPEG formats must be defined last to support jpeg_enable parameter */
static const struct s2255_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16
}, {
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8
}, {
.fourcc = V4L2_PIX_FMT_JPEG,
.depth = 24
}, {
.fourcc = V4L2_PIX_FMT_MJPEG,
.depth = 24
}
};
static int norm_maxw(struct s2255_vc *vc)
{
return (vc->std & V4L2_STD_525_60) ?
LINE_SZ_4CIFS_NTSC : LINE_SZ_4CIFS_PAL;
}
static int norm_maxh(struct s2255_vc *vc)
{
return (vc->std & V4L2_STD_525_60) ?
(NUM_LINES_1CIFS_NTSC * 2) : (NUM_LINES_1CIFS_PAL * 2);
}
static int norm_minw(struct s2255_vc *vc)
{
return (vc->std & V4L2_STD_525_60) ?
LINE_SZ_1CIFS_NTSC : LINE_SZ_1CIFS_PAL;
}
static int norm_minh(struct s2255_vc *vc)
{
return (vc->std & V4L2_STD_525_60) ?
(NUM_LINES_1CIFS_NTSC) : (NUM_LINES_1CIFS_PAL);
}
/*
* TODO: fixme: move YUV reordering to hardware
* converts 2255 planar format to yuyv or uyvy
*/
static void planar422p_to_yuv_packed(const unsigned char *in,
unsigned char *out,
int width, int height,
int fmt)
{
unsigned char *pY;
unsigned char *pCb;
unsigned char *pCr;
unsigned long size = height * width;
unsigned int i;
pY = (unsigned char *)in;
pCr = (unsigned char *)in + height * width;
pCb = (unsigned char *)in + height * width + (height * width / 2);
for (i = 0; i < size * 2; i += 4) {
out[i] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCr++;
out[i + 1] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCr++ : *pY++;
out[i + 2] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCb++;
out[i + 3] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCb++ : *pY++;
}
return;
}
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
msleep(50);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
msleep(600);
s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
return;
}
/* kickstarts the firmware loading. from probe
*/
static void s2255_timer(struct timer_list *t)
{
struct s2255_dev *dev = from_timer(dev, t, timer);
struct s2255_fw *data = dev->fw_data;
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
pr_err("s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
}
/* this loads the firmware asynchronously.
Originally this was done synchronously in probe.
But it is better to load it asynchronously here than block
inside the probe function. Blocking inside probe affects boot time.
FW loading is triggered by the timer in the probe function
*/
static void s2255_fwchunk_complete(struct urb *urb)
{
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
if (data->fw_urb == NULL) {
s2255_dev_err(&udev->dev, "disconnected\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
#define CHUNK_SIZE 512
/* all USB transfers must be done with continuous kernel memory.
can't allocate more than 128k in current linux kernel, so
upload the firmware in chunks
*/
if (data->fw_loaded < data->fw_size) {
len = (data->fw_loaded + CHUNK_SIZE) > data->fw_size ?
data->fw_size % CHUNK_SIZE : CHUNK_SIZE;
if (len < CHUNK_SIZE)
memset(data->pfw_data, 0, CHUNK_SIZE);
memcpy(data->pfw_data,
(char *) data->fw->data + data->fw_loaded, len);
usb_fill_bulk_urb(data->fw_urb, udev, usb_sndbulkpipe(udev, 2),
data->pfw_data, CHUNK_SIZE,
s2255_fwchunk_complete, data);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
dev_err(&udev->dev, "failed submit URB\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
data->fw_loaded += len;
} else
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
return;
}
static void s2255_got_frame(struct s2255_vc *vc, int jpgsize)
{
struct s2255_buffer *buf;
struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
unsigned long flags = 0;
spin_lock_irqsave(&vc->qlock, flags);
if (list_empty(&vc->buf_list)) {
dprintk(dev, 1, "No active queue to serve\n");
spin_unlock_irqrestore(&vc->qlock, flags);
return;
}
buf = list_entry(vc->buf_list.next,
struct s2255_buffer, list);
list_del(&buf->list);
buf->vb.vb2_buf.timestamp = ktime_get_ns();
buf->vb.field = vc->field;
buf->vb.sequence = vc->frame_count;
spin_unlock_irqrestore(&vc->qlock, flags);
s2255_fillbuff(vc, buf, jpgsize);
/* tell v4l buffer was filled */
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
}
static const struct s2255_fmt *format_by_fourcc(int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
if (!jpeg_enable && ((formats[i].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[i].fourcc == V4L2_PIX_FMT_MJPEG)))
continue;
if (formats[i].fourcc == fourcc)
return formats + i;
}
return NULL;
}
/* video buffer vmalloc implementation based partly on VIVI driver which is
* Copyright (c) 2006 by
* Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
* Ted Walther <ted--a.t--enumera.com>
* John Sokol <sokol--a.t--videotechnology.com>
* http://v4l.videotechnology.com/
*
*/
static void s2255_fillbuff(struct s2255_vc *vc,
struct s2255_buffer *buf, int jpgsize)
{
int pos = 0;
const char *tmpbuf;
char *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
unsigned long last_frame;
struct s2255_dev *dev = vc->dev;
if (!vbuf)
return;
last_frame = vc->last_frame;
if (last_frame != -1) {
tmpbuf =
(const char *)vc->buffer.frame[last_frame].lpvbits;
switch (vc->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
planar422p_to_yuv_packed((const unsigned char *)tmpbuf,
vbuf, vc->width,
vc->height,
vc->fmt->fourcc);
break;
case V4L2_PIX_FMT_GREY:
memcpy(vbuf, tmpbuf, vc->width * vc->height);
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, jpgsize);
memcpy(vbuf, tmpbuf, jpgsize);
break;
case V4L2_PIX_FMT_YUV422P:
memcpy(vbuf, tmpbuf,
vc->width * vc->height * 2);
break;
default:
pr_info("s2255: unknown format?\n");
}
vc->last_frame = -1;
} else {
pr_err("s2255: =======no frame\n");
return;
}
dprintk(dev, 2, "s2255fill at : Buffer %p size= %d\n",
vbuf, pos);
}
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct s2255_vc *vc = vb2_get_drv_priv(vq);
if (*nbuffers < S2255_MIN_BUFS)
*nbuffers = S2255_MIN_BUFS;
*nplanes = 1;
sizes[0] = vc->width * vc->height * (vc->fmt->depth >> 3);
return 0;
}
static int buffer_prepare(struct vb2_buffer *vb)
{
struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
int w = vc->width;
int h = vc->height;
unsigned long size;
dprintk(vc->dev, 4, "%s\n", __func__);
if (vc->fmt == NULL)
return -EINVAL;
if ((w < norm_minw(vc)) ||
(w > norm_maxw(vc)) ||
(h < norm_minh(vc)) ||
(h > norm_maxh(vc))) {
dprintk(vc->dev, 4, "invalid buffer prepare\n");
return -EINVAL;
}
size = w * h * (vc->fmt->depth >> 3);
if (vb2_plane_size(vb, 0) < size) {
dprintk(vc->dev, 4, "invalid buffer prepare\n");
return -EINVAL;
}
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
return 0;
}
static void buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
unsigned long flags = 0;
dprintk(vc->dev, 1, "%s\n", __func__);
spin_lock_irqsave(&vc->qlock, flags);
list_add_tail(&buf->list, &vc->buf_list);
spin_unlock_irqrestore(&vc->qlock, flags);
}
static int start_streaming(struct vb2_queue *vq, unsigned int count);
static void stop_streaming(struct vb2_queue *vq);
static const struct vb2_ops s2255_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
};
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct s2255_vc *vc = video_drvdata(file);
struct s2255_dev *dev = vc->dev;
strscpy(cap->driver, "s2255", sizeof(cap->driver));
strscpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
int index = f->index;
if (index >= ARRAY_SIZE(formats))
return -EINVAL;
if (!jpeg_enable && ((formats[index].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[index].fourcc == V4L2_PIX_FMT_MJPEG)))
return -EINVAL;
f->pixelformat = formats[index].fourcc;
return 0;
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s2255_vc *vc = video_drvdata(file);
int is_ntsc = vc->std & V4L2_STD_525_60;
f->fmt.pix.width = vc->width;
f->fmt.pix.height = vc->height;
if (f->fmt.pix.height >=
(is_ntsc ? NUM_LINES_1CIFS_NTSC : NUM_LINES_1CIFS_PAL) * 2)
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
else
f->fmt.pix.field = V4L2_FIELD_TOP;
f->fmt.pix.pixelformat = vc->fmt->fourcc;
f->fmt.pix.bytesperline = f->fmt.pix.width * (vc->fmt->depth >> 3);
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct s2255_fmt *fmt;
enum v4l2_field field;
struct s2255_vc *vc = video_drvdata(file);
int is_ntsc = vc->std & V4L2_STD_525_60;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (fmt == NULL)
return -EINVAL;
dprintk(vc->dev, 50, "%s NTSC: %d suggested width: %d, height: %d\n",
__func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) {
f->fmt.pix.height = NUM_LINES_1CIFS_NTSC * 2;
field = V4L2_FIELD_INTERLACED;
} else {
f->fmt.pix.height = NUM_LINES_1CIFS_NTSC;
field = V4L2_FIELD_TOP;
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_4CIFS_NTSC;
else
f->fmt.pix.width = LINE_SZ_1CIFS_NTSC;
} else {
/* PAL */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_PAL * 2) {
f->fmt.pix.height = NUM_LINES_1CIFS_PAL * 2;
field = V4L2_FIELD_INTERLACED;
} else {
f->fmt.pix.height = NUM_LINES_1CIFS_PAL;
field = V4L2_FIELD_TOP;
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL)
f->fmt.pix.width = LINE_SZ_4CIFS_PAL;
else
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
}
f->fmt.pix.field = field;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
dprintk(vc->dev, 50, "%s: set width %d height %d field %d\n", __func__,
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s2255_vc *vc = video_drvdata(file);
const struct s2255_fmt *fmt;
struct vb2_queue *q = &vc->vb_vidq;
struct s2255_mode mode;
int ret;
ret = vidioc_try_fmt_vid_cap(file, vc, f);
if (ret < 0)
return ret;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (fmt == NULL)
return -EINVAL;
if (vb2_is_busy(q)) {
dprintk(vc->dev, 1, "queue busy\n");
return -EBUSY;
}
mode = vc->mode;
vc->fmt = fmt;
vc->width = f->fmt.pix.width;
vc->height = f->fmt.pix.height;
vc->field = f->fmt.pix.field;
if (vc->width > norm_minw(vc)) {
if (vc->height > norm_minh(vc)) {
if (vc->cap_parm.capturemode &
V4L2_MODE_HIGHQUALITY)
mode.scale = SCALE_4CIFSI;
else
mode.scale = SCALE_4CIFS;
} else
mode.scale = SCALE_2CIFS;
} else {
mode.scale = SCALE_1CIFS;
}
/* color mode */
switch (vc->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_Y8;
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_JPG;
mode.color |= (vc->jpegqual << 8);
break;
case V4L2_PIX_FMT_YUV422P:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_YUVPL;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_YUVPK;
break;
}
if ((mode.color & MASK_COLOR) != (vc->mode.color & MASK_COLOR))
mode.restart = 1;
else if (mode.scale != vc->mode.scale)
mode.restart = 1;
else if (mode.format != vc->mode.format)
mode.restart = 1;
vc->mode = mode;
(void) s2255_set_mode(vc, &mode);
return 0;
}
/* write to the configuration pipe, synchronously */
static int s2255_write_config(struct usb_device *udev, unsigned char *pbuf,
int size)
{
int pipe;
int done;
long retval = -1;
if (udev) {
pipe = usb_sndbulkpipe(udev, S2255_CONFIG_EP);
retval = usb_bulk_msg(udev, pipe, pbuf, size, &done, 500);
}
return retval;
}
static u32 get_transfer_size(struct s2255_mode *mode)
{
int linesPerFrame = LINE_SZ_DEF;
int pixelsPerLine = NUM_LINES_DEF;
u32 outImageSize;
u32 usbInSize;
unsigned int mask_mult;
if (mode == NULL)
return 0;
if (mode->format == FORMAT_NTSC) {
switch (mode->scale) {
case SCALE_4CIFS:
case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_NTSC * 2;
pixelsPerLine = LINE_SZ_4CIFS_NTSC;
break;
case SCALE_2CIFS:
linesPerFrame = NUM_LINES_2CIFS_NTSC;
pixelsPerLine = LINE_SZ_2CIFS_NTSC;
break;
case SCALE_1CIFS:
linesPerFrame = NUM_LINES_1CIFS_NTSC;
pixelsPerLine = LINE_SZ_1CIFS_NTSC;
break;
default:
break;
}
} else if (mode->format == FORMAT_PAL) {
switch (mode->scale) {
case SCALE_4CIFS:
case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_PAL * 2;
pixelsPerLine = LINE_SZ_4CIFS_PAL;
break;
case SCALE_2CIFS:
linesPerFrame = NUM_LINES_2CIFS_PAL;
pixelsPerLine = LINE_SZ_2CIFS_PAL;
break;
case SCALE_1CIFS:
linesPerFrame = NUM_LINES_1CIFS_PAL;
pixelsPerLine = LINE_SZ_1CIFS_PAL;
break;
default:
break;
}
}
outImageSize = linesPerFrame * pixelsPerLine;
if ((mode->color & MASK_COLOR) != COLOR_Y8) {
/* 2 bytes/pixel if not monochrome */
outImageSize *= 2;
}
/* total bytes to send including prefix and 4K padding;
must be a multiple of USB_READ_SIZE */
usbInSize = outImageSize + PREFIX_SIZE; /* always send prefix */
mask_mult = 0xffffffffUL - DEF_USB_BLOCK + 1;
/* if size not a multiple of USB_READ_SIZE */
if (usbInSize & ~mask_mult)
usbInSize = (usbInSize & mask_mult) + (DEF_USB_BLOCK);
return usbInSize;
}
static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
{
struct device *dev = &sdev->udev->dev;
dev_info(dev, "------------------------------------------------\n");
dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale);
dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color);
dev_info(dev, "bright: 0x%x\n", mode->bright);
dev_info(dev, "------------------------------------------------\n");
}
/*
* set mode is the function which controls the DSP.
* the restart parameter in struct s2255_mode should be set whenever
* the image size could change via color format, video system or image
* size.
* When the restart parameter is set, we sleep for ONE frame to allow the
* DSP time to get the new frame
*/
static int s2255_set_mode(struct s2255_vc *vc,
struct s2255_mode *mode)
{
int res;
unsigned long chn_rev;
struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
int i;
__le32 *buffer = dev->cmdbuf;
mutex_lock(&dev->cmdlock);
chn_rev = G_chnmap[vc->idx];
dprintk(dev, 3, "%s channel: %d\n", __func__, vc->idx);
/* if JPEG, set the quality */
if ((mode->color & MASK_COLOR) == COLOR_JPG) {
mode->color &= ~MASK_COLOR;
mode->color |= COLOR_JPG;
mode->color &= ~MASK_JPG_QUALITY;
mode->color |= (vc->jpegqual << 8);
}
/* save the mode */
vc->mode = *mode;
vc->req_image_size = get_transfer_size(mode);
dprintk(dev, 1, "%s: reqsize %ld\n", __func__, vc->req_image_size);
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
for (i = 0; i < sizeof(struct s2255_mode) / sizeof(u32); i++)
buffer[3 + i] = cpu_to_le32(((u32 *)&vc->mode)[i]);
vc->setmode_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
s2255_print_cfg(dev, mode);
/* wait at least 3 frames before continuing */
if (mode->restart) {
wait_event_timeout(vc->wait_setmode,
(vc->setmode_ready != 0),
msecs_to_jiffies(S2255_SETMODE_TIMEOUT));
if (vc->setmode_ready != 1) {
dprintk(dev, 0, "s2255: no set mode response\n");
res = -EFAULT;
}
}
/* clear the restart flag */
vc->mode.restart = 0;
dprintk(dev, 1, "%s chn %d, result: %d\n", __func__, vc->idx, res);
mutex_unlock(&dev->cmdlock);
return res;
}
static int s2255_cmd_status(struct s2255_vc *vc, u32 *pstatus)
{
int res;
u32 chn_rev;
struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
__le32 *buffer = dev->cmdbuf;
mutex_lock(&dev->cmdlock);
chn_rev = G_chnmap[vc->idx];
dprintk(dev, 4, "%s chan %d\n", __func__, vc->idx);
/* form the get vid status command */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_STATUS;
*pstatus = 0;
vc->vidstatus_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
wait_event_timeout(vc->wait_vidstatus,
(vc->vidstatus_ready != 0),
msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
if (vc->vidstatus_ready != 1) {
dprintk(dev, 0, "s2255: no vidstatus response\n");
res = -EFAULT;
}
*pstatus = vc->vidstatus;
dprintk(dev, 4, "%s, vid status %d\n", __func__, *pstatus);
mutex_unlock(&dev->cmdlock);
return res;
}
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct s2255_vc *vc = vb2_get_drv_priv(vq);
int j;
vc->last_frame = -1;
vc->bad_payload = 0;
vc->cur_frame = 0;
vc->frame_count = 0;
for (j = 0; j < SYS_FRAMES; j++) {
vc->buffer.frame[j].ulState = S2255_READ_IDLE;
vc->buffer.frame[j].cur_size = 0;
}
return s2255_start_acquire(vc);
}
/* abort streaming and wait for last buffer */
static void stop_streaming(struct vb2_queue *vq)
{
struct s2255_vc *vc = vb2_get_drv_priv(vq);
struct s2255_buffer *buf, *node;
unsigned long flags;
(void) s2255_stop_acquire(vc);
spin_lock_irqsave(&vc->qlock, flags);
list_for_each_entry_safe(buf, node, &vc->buf_list, list) {
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(vc->dev, 2, "[%p/%d] done\n",
buf, buf->vb.vb2_buf.index);
}
spin_unlock_irqrestore(&vc->qlock, flags);
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id i)
{
struct s2255_vc *vc = video_drvdata(file);
struct s2255_mode mode;
struct vb2_queue *q = &vc->vb_vidq;
/*
* Changing the standard implies a format change, which is not allowed
* while buffers for use with streaming have already been allocated.
*/
if (vb2_is_busy(q))
return -EBUSY;
mode = vc->mode;
if (i & V4L2_STD_525_60) {
dprintk(vc->dev, 4, "%s 60 Hz\n", __func__);
/* if changing format, reset frame decimation/intervals */
if (mode.format != FORMAT_NTSC) {
mode.restart = 1;
mode.format = FORMAT_NTSC;
mode.fdec = FDEC_1;
vc->width = LINE_SZ_4CIFS_NTSC;
vc->height = NUM_LINES_4CIFS_NTSC * 2;
}
} else if (i & V4L2_STD_625_50) {
dprintk(vc->dev, 4, "%s 50 Hz\n", __func__);
if (mode.format != FORMAT_PAL) {
mode.restart = 1;
mode.format = FORMAT_PAL;
mode.fdec = FDEC_1;
vc->width = LINE_SZ_4CIFS_PAL;
vc->height = NUM_LINES_4CIFS_PAL * 2;
}
} else
return -EINVAL;
vc->std = i;
if (mode.restart)
s2255_set_mode(vc, &mode);
return 0;
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *i)
{
struct s2255_vc *vc = video_drvdata(file);
*i = vc->std;
return 0;
}
/* Sensoray 2255 is a multiple channel capture device.
It does not have a "crossbar" of inputs.
We use one V4L device per channel. The user must
be aware that certain combinations are not allowed.
For instance, you cannot do full FPS on more than 2 channels(2 videodevs)
at once in color(you can do full fps on 4 channels with greyscale.
*/
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct s2255_vc *vc = video_drvdata(file);
struct s2255_dev *dev = vc->dev;
u32 status = 0;
if (inp->index != 0)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = S2255_NORMS;
inp->status = 0;
if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
int rc;
rc = s2255_cmd_status(vc, &status);
dprintk(dev, 4, "s2255_cmd_status rc: %d status %x\n",
rc, status);
if (rc == 0)
inp->status = (status & 0x01) ? 0
: V4L2_IN_ST_NO_SIGNAL;
}
switch (dev->pid) {
case 0x2255:
default:
strscpy(inp->name, "Composite", sizeof(inp->name));
break;
case 0x2257:
strscpy(inp->name, (vc->idx < 2) ? "Composite" : "S-Video",
sizeof(inp->name));
break;
}
return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
if (i > 0)
return -EINVAL;
return 0;
}
static int s2255_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct s2255_vc *vc =
container_of(ctrl->handler, struct s2255_vc, hdl);
struct s2255_mode mode;
mode = vc->mode;
/* update the mode to the corresponding value */
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
mode.bright = ctrl->val;
break;
case V4L2_CID_CONTRAST:
mode.contrast = ctrl->val;
break;
case V4L2_CID_HUE:
mode.hue = ctrl->val;
break;
case V4L2_CID_SATURATION:
mode.saturation = ctrl->val;
break;
case V4L2_CID_S2255_COLORFILTER:
mode.color &= ~MASK_INPUT_TYPE;
mode.color |= !ctrl->val << 16;
break;
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
vc->jpegqual = ctrl->val;
return 0;
default:
return -EINVAL;
}
mode.restart = 0;
/* set mode here. Note: stream does not need restarted.
some V4L programs restart stream unnecessarily
after a s_crtl.
*/
s2255_set_mode(vc, &mode);
return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jc)
{
struct s2255_vc *vc = video_drvdata(file);
memset(jc, 0, sizeof(*jc));
jc->quality = vc->jpegqual;
dprintk(vc->dev, 2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_s_jpegcomp(struct file *file, void *priv,
const struct v4l2_jpegcompression *jc)
{
struct s2255_vc *vc = video_drvdata(file);
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
v4l2_ctrl_s_ctrl(vc->jpegqual_ctrl, jc->quality);
dprintk(vc->dev, 2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
__u32 def_num, def_dem;
struct s2255_vc *vc = video_drvdata(file);
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
sp->parm.capture.capturemode = vc->cap_parm.capturemode;
sp->parm.capture.readbuffers = S2255_MIN_BUFS;
def_num = (vc->mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (vc->mode.format == FORMAT_NTSC) ? 30000 : 25000;
sp->parm.capture.timeperframe.denominator = def_dem;
switch (vc->mode.fdec) {
default:
case FDEC_1:
sp->parm.capture.timeperframe.numerator = def_num;
break;
case FDEC_2:
sp->parm.capture.timeperframe.numerator = def_num * 2;
break;
case FDEC_3:
sp->parm.capture.timeperframe.numerator = def_num * 3;
break;
case FDEC_5:
sp->parm.capture.timeperframe.numerator = def_num * 5;
break;
}
dprintk(vc->dev, 4, "%s capture mode, %d timeperframe %d/%d\n",
__func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator);
return 0;
}
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct s2255_vc *vc = video_drvdata(file);
struct s2255_mode mode;
int fdec = FDEC_1;
__u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mode = vc->mode;
/* high quality capture mode requires a stream restart */
if ((vc->cap_parm.capturemode != sp->parm.capture.capturemode)
&& vb2_is_streaming(&vc->vb_vidq))
return -EBUSY;
def_num = (mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (mode.format == FORMAT_NTSC) ? 30000 : 25000;
if (def_dem != sp->parm.capture.timeperframe.denominator)
sp->parm.capture.timeperframe.numerator = def_num;
else if (sp->parm.capture.timeperframe.numerator <= def_num)
sp->parm.capture.timeperframe.numerator = def_num;
else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) {
sp->parm.capture.timeperframe.numerator = def_num * 2;
fdec = FDEC_2;
} else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) {
sp->parm.capture.timeperframe.numerator = def_num * 3;
fdec = FDEC_3;
} else {
sp->parm.capture.timeperframe.numerator = def_num * 5;
fdec = FDEC_5;
}
mode.fdec = fdec;
sp->parm.capture.timeperframe.denominator = def_dem;
sp->parm.capture.readbuffers = S2255_MIN_BUFS;
s2255_set_mode(vc, &mode);
dprintk(vc->dev, 4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
__func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator, fdec);
return 0;
}
#define NUM_SIZE_ENUMS 3
static const struct v4l2_frmsize_discrete ntsc_sizes[] = {
{ 640, 480 },
{ 640, 240 },
{ 320, 240 },
};
static const struct v4l2_frmsize_discrete pal_sizes[] = {
{ 704, 576 },
{ 704, 288 },
{ 352, 288 },
};
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fe)
{
struct s2255_vc *vc = video_drvdata(file);
int is_ntsc = vc->std & V4L2_STD_525_60;
const struct s2255_fmt *fmt;
if (fe->index >= NUM_SIZE_ENUMS)
return -EINVAL;
fmt = format_by_fourcc(fe->pixel_format);
if (fmt == NULL)
return -EINVAL;
fe->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fe->discrete = is_ntsc ? ntsc_sizes[fe->index] : pal_sizes[fe->index];
return 0;
}
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fe)
{
struct s2255_vc *vc = video_drvdata(file);
const struct s2255_fmt *fmt;
const struct v4l2_frmsize_discrete *sizes;
int is_ntsc = vc->std & V4L2_STD_525_60;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
int i;
if (fe->index >= NUM_FRAME_ENUMS)
return -EINVAL;
fmt = format_by_fourcc(fe->pixel_format);
if (fmt == NULL)
return -EINVAL;
sizes = is_ntsc ? ntsc_sizes : pal_sizes;
for (i = 0; i < NUM_SIZE_ENUMS; i++, sizes++)
if (fe->width == sizes->width &&
fe->height == sizes->height)
break;
if (i == NUM_SIZE_ENUMS)
return -EINVAL;
fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fe->discrete.denominator = is_ntsc ? 30000 : 25000;
fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
dprintk(vc->dev, 4, "%s discrete %d/%d\n", __func__,
fe->discrete.numerator,
fe->discrete.denominator);
return 0;
}
static int s2255_open(struct file *file)
{
struct s2255_vc *vc = video_drvdata(file);
struct s2255_dev *dev = vc->dev;
int state;
int rc = 0;
rc = v4l2_fh_open(file);
if (rc != 0)
return rc;
dprintk(dev, 1, "s2255: %s\n", __func__);
state = atomic_read(&dev->fw_data->fw_state);
switch (state) {
case S2255_FW_DISCONNECTING:
return -ENODEV;
case S2255_FW_FAILED:
s2255_dev_err(&dev->udev->dev,
"firmware load failed. retrying.\n");
s2255_fwload_start(dev);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
/* state may have changed, re-read */
state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_NOTLOADED:
case S2255_FW_LOADED_DSPWAIT:
/* give S2255_LOAD_TIMEOUT time for firmware to load in case
driver loaded and then device immediately opened */
pr_info("%s waiting for firmware load\n", __func__);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
/* state may have changed, re-read */
state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_SUCCESS:
default:
break;
}
/* state may have changed in above switch statement */
switch (state) {
case S2255_FW_SUCCESS:
break;
case S2255_FW_FAILED:
pr_info("2255 firmware load failed.\n");
return -ENODEV;
case S2255_FW_DISCONNECTING:
pr_info("%s: disconnecting\n", __func__);
return -ENODEV;
case S2255_FW_LOADED_DSPWAIT:
case S2255_FW_NOTLOADED:
pr_info("%s: firmware not loaded, please retry\n",
__func__);
/*
* Timeout on firmware load means device unusable.
* Set firmware failure state.
* On next s2255_open the firmware will be reloaded.
*/
atomic_set(&dev->fw_data->fw_state,
S2255_FW_FAILED);
return -EAGAIN;
default:
pr_info("%s: unknown state\n", __func__);
return -EFAULT;
}
if (!vc->configured) {
/* configure channel to default state */
vc->fmt = &formats[0];
s2255_set_mode(vc, &vc->mode);
vc->configured = 1;
}
return 0;
}
static void s2255_destroy(struct s2255_dev *dev)
{
dprintk(dev, 1, "%s", __func__);
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
timer_shutdown_sync(&dev->timer); /* only started in .probe and .open */
if (dev->fw_data->fw_urb) {
usb_kill_urb(dev->fw_data->fw_urb);
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
}
release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
/* reset the DSP so firmware can be reloaded next time */
s2255_reset_dsppower(dev);
mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev->cmdbuf);
kfree(dev);
}
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
.release = vb2_fop_release,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = vb2_fop_mmap,
.read = vb2_fop_read,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_s_jpegcomp = vidioc_s_jpegcomp,
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_g_parm = vidioc_g_parm,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_enum_frameintervals = vidioc_enum_frameintervals,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static void s2255_video_device_release(struct video_device *vdev)
{
struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
struct s2255_vc *vc =
container_of(vdev, struct s2255_vc, vdev);
dprintk(dev, 4, "%s, chnls: %d\n", __func__,
refcount_read(&dev->num_channels));
v4l2_ctrl_handler_free(&vc->hdl);
if (refcount_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
return;
}
static const struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
.release = s2255_video_device_release,
.tvnorms = S2255_NORMS,
};
static const struct v4l2_ctrl_ops s2255_ctrl_ops = {
.s_ctrl = s2255_s_ctrl,
};
static const struct v4l2_ctrl_config color_filter_ctrl = {
.ops = &s2255_ctrl_ops,
.name = "Color Filter",
.id = V4L2_CID_S2255_COLORFILTER,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.max = 1,
.step = 1,
.def = 1,
};
static int s2255_probe_v4l(struct s2255_dev *dev)
{
int ret;
int i;
int cur_nr = video_nr;
struct s2255_vc *vc;
struct vb2_queue *q;
ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
if (ret)
return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
vc = &dev->vc[i];
INIT_LIST_HEAD(&vc->buf_list);
v4l2_ctrl_handler_init(&vc->hdl, 6);
v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_BRIGHTNESS, -127, 127, 1, DEF_BRIGHT);
v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, DEF_CONTRAST);
v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, DEF_SATURATION);
v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops,
V4L2_CID_HUE, 0, 255, 1, DEF_HUE);
vc->jpegqual_ctrl = v4l2_ctrl_new_std(&vc->hdl,
&s2255_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY,
0, 100, 1, S2255_DEF_JPEG_QUAL);
if (dev->dsp_fw_ver >= S2255_MIN_DSP_COLORFILTER &&
(dev->pid != 0x2257 || vc->idx <= 1))
v4l2_ctrl_new_custom(&vc->hdl, &color_filter_ctrl,
NULL);
if (vc->hdl.error) {
ret = vc->hdl.error;
v4l2_ctrl_handler_free(&vc->hdl);
dev_err(&dev->udev->dev, "couldn't register control\n");
break;
}
q = &vc->vb_vidq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_READ | VB2_USERPTR;
q->drv_priv = vc;
q->lock = &vc->vb_lock;
q->buf_struct_size = sizeof(struct s2255_buffer);
q->mem_ops = &vb2_vmalloc_memops;
q->ops = &s2255_video_qops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(q);
if (ret != 0) {
dev_err(&dev->udev->dev,
"%s vb2_queue_init 0x%x\n", __func__, ret);
break;
}
/* register video devices */
vc->vdev = template;
vc->vdev.queue = q;
vc->vdev.ctrl_handler = &vc->hdl;
vc->vdev.lock = &dev->lock;
vc->vdev.v4l2_dev = &dev->v4l2_dev;
vc->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
video_set_drvdata(&vc->vdev, vc);
if (video_nr == -1)
ret = video_register_device(&vc->vdev,
VFL_TYPE_VIDEO,
video_nr);
else
ret = video_register_device(&vc->vdev,
VFL_TYPE_VIDEO,
cur_nr + i);
if (ret) {
dev_err(&dev->udev->dev,
"failed to register video device!\n");
break;
}
refcount_inc(&dev->num_channels);
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(&vc->vdev));
}
pr_info("Sensoray 2255 V4L driver Revision: %s\n",
S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
if (refcount_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
pr_warn("s2255: Not all channels available.\n");
return 0;
}
/* this function moves the usb stream read pipe data
* into the system buffers.
* returns 0 on success, EAGAIN if more data to process( call this
* function again).
*
* Received frame structure:
* bytes 0-3: marker : 0x2255DA4AL (S2255_MARKER_FRAME)
* bytes 4-7: channel: 0-3
* bytes 8-11: payload size: size of the frame
* bytes 12-payloadsize+12: frame data
*/
static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
{
char *pdest;
u32 offset = 0;
int bframe = 0;
char *psrc;
unsigned long copy_size;
unsigned long size;
s32 idx = -1;
struct s2255_framei *frm;
unsigned char *pdata;
struct s2255_vc *vc;
dprintk(dev, 100, "buffer to user\n");
vc = &dev->vc[dev->cc];
idx = vc->cur_frame;
frm = &vc->buffer.frame[idx];
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
__le32 *pdword; /*data from dsp is little endian */
int payload;
/* search for marker codes */
pdata = (unsigned char *)pipe_info->transfer_buffer;
pdword = (__le32 *)pdata;
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
switch (*pdword) {
case S2255_MARKER_FRAME:
dprintk(dev, 4, "marker @ offset: %d [%x %x]\n",
jj, pdata[0], pdata[1]);
offset = jj + PREFIX_SIZE;
bframe = 1;
cc = le32_to_cpu(pdword[1]);
if (cc >= MAX_CHANNELS) {
dprintk(dev, 0,
"bad channel\n");
return -EINVAL;
}
/* reverse it */
dev->cc = G_chnmap[cc];
vc = &dev->vc[dev->cc];
payload = le32_to_cpu(pdword[3]);
if (payload > vc->req_image_size) {
vc->bad_payload++;
/* discard the bad frame */
return -EINVAL;
}
vc->pkt_size = payload;
vc->jpg_size = le32_to_cpu(pdword[4]);
break;
case S2255_MARKER_RESPONSE:
pdata += DEF_USB_BLOCK;
jj += DEF_USB_BLOCK;
if (le32_to_cpu(pdword[1]) >= MAX_CHANNELS)
break;
cc = G_chnmap[le32_to_cpu(pdword[1])];
if (cc >= MAX_CHANNELS)
break;
vc = &dev->vc[cc];
switch (pdword[2]) {
case S2255_RESPONSE_SETMODE:
/* check if channel valid */
/* set mode ready */
vc->setmode_ready = 1;
wake_up(&vc->wait_setmode);
dprintk(dev, 5, "setmode rdy %d\n", cc);
break;
case S2255_RESPONSE_FW:
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
/* all channels ready */
pr_info("s2255: fw loaded\n");
atomic_set(&dev->fw_data->fw_state,
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
case S2255_RESPONSE_STATUS:
vc->vidstatus = le32_to_cpu(pdword[3]);
vc->vidstatus_ready = 1;
wake_up(&vc->wait_vidstatus);
dprintk(dev, 5, "vstat %x chan %d\n",
le32_to_cpu(pdword[3]), cc);
break;
default:
pr_info("s2255 unknown resp\n");
}
pdata++;
break;
default:
pdata++;
break;
}
if (bframe)
break;
} /* for */
if (!bframe)
return -EINVAL;
}
vc = &dev->vc[dev->cc];
idx = vc->cur_frame;
frm = &vc->buffer.frame[idx];
/* search done. now find out if should be acquiring on this channel */
if (!vb2_is_streaming(&vc->vb_vidq)) {
/* we found a frame, but this channel is turned off */
frm->ulState = S2255_READ_IDLE;
return -EINVAL;
}
if (frm->ulState == S2255_READ_IDLE) {
frm->ulState = S2255_READ_FRAME;
frm->cur_size = 0;
}
/* skip the marker 512 bytes (and offset if out of sync) */
psrc = (u8 *)pipe_info->transfer_buffer + offset;
if (frm->lpvbits == NULL) {
dprintk(dev, 1, "s2255 frame buffer == NULL.%p %p %d %d",
frm, dev, dev->cc, idx);
return -ENOMEM;
}
pdest = frm->lpvbits + frm->cur_size;
copy_size = (pipe_info->cur_transfer_size - offset);
size = vc->pkt_size - PREFIX_SIZE;
/* sanity check on pdest */
if ((copy_size + frm->cur_size) < vc->req_image_size)
memcpy(pdest, psrc, copy_size);
frm->cur_size += copy_size;
dprintk(dev, 4, "cur_size: %lu, size: %lu\n", frm->cur_size, size);
if (frm->cur_size >= size) {
dprintk(dev, 2, "******[%d]Buffer[%d]full*******\n",
dev->cc, idx);
vc->last_frame = vc->cur_frame;
vc->cur_frame++;
/* end of system frame ring buffer, start at zero */
if ((vc->cur_frame == SYS_FRAMES) ||
(vc->cur_frame == vc->buffer.dwFrames))
vc->cur_frame = 0;
/* frame ready */
if (vb2_is_streaming(&vc->vb_vidq))
s2255_got_frame(vc, vc->jpg_size);
vc->frame_count++;
frm->ulState = S2255_READ_IDLE;
frm->cur_size = 0;
}
/* done successfully */
return 0;
}
static void s2255_read_video_callback(struct s2255_dev *dev,
struct s2255_pipeinfo *pipe_info)
{
int res;
dprintk(dev, 50, "callback read video\n");
if (dev->cc >= MAX_CHANNELS) {
dev->cc = 0;
dev_err(&dev->udev->dev, "invalid channel\n");
return;
}
/* otherwise copy to the system buffers */
res = save_frame(dev, pipe_info);
if (res != 0)
dprintk(dev, 4, "s2255: read callback failed\n");
dprintk(dev, 50, "callback read video done\n");
return;
}
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
u16 Index, u16 Value, void *TransferBuffer,
s32 TransferBufferLength, int bOut)
{
int r;
unsigned char *buf;
buf = kmalloc(TransferBufferLength, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
Value, Index, buf,
TransferBufferLength, USB_CTRL_SET_TIMEOUT);
if (r >= 0)
memcpy(TransferBuffer, buf, TransferBufferLength);
} else {
memcpy(buf, TransferBuffer, TransferBufferLength);
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
Value, Index, buf,
TransferBufferLength, USB_CTRL_SET_TIMEOUT);
}
kfree(buf);
return r;
}
/*
* retrieve FX2 firmware version. future use.
* @param dev pointer to device extension
* @return -1 for fail, else returns firmware version as an int(16 bits)
*/
static int s2255_get_fx2fw(struct s2255_dev *dev)
{
int fw;
int ret;
u8 transBuffer[2] = {};
ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer,
sizeof(transBuffer), S2255_VR_IN);
if (ret < 0)
dprintk(dev, 2, "get fw error: %x\n", ret);
fw = transBuffer[0] + (transBuffer[1] << 8);
dprintk(dev, 2, "Get FW %x %x\n", transBuffer[0], transBuffer[1]);
return fw;
}
/*
* Create the system ring buffer to copy frames into from the
* usb read pipe.
*/
static int s2255_create_sys_buffers(struct s2255_vc *vc)
{
unsigned long i;
unsigned long reqsize;
vc->buffer.dwFrames = SYS_FRAMES;
/* always allocate maximum size(PAL) for system buffers */
reqsize = SYS_FRAMES_MAXSIZE;
if (reqsize > SYS_FRAMES_MAXSIZE)
reqsize = SYS_FRAMES_MAXSIZE;
for (i = 0; i < SYS_FRAMES; i++) {
/* allocate the frames */
vc->buffer.frame[i].lpvbits = vmalloc(reqsize);
vc->buffer.frame[i].size = reqsize;
if (vc->buffer.frame[i].lpvbits == NULL) {
pr_info("out of memory. using less frames\n");
vc->buffer.dwFrames = i;
break;
}
}
/* make sure internal states are set */
for (i = 0; i < SYS_FRAMES; i++) {
vc->buffer.frame[i].ulState = 0;
vc->buffer.frame[i].cur_size = 0;
}
vc->cur_frame = 0;
vc->last_frame = -1;
return 0;
}
static int s2255_release_sys_buffers(struct s2255_vc *vc)
{
unsigned long i;
for (i = 0; i < SYS_FRAMES; i++) {
vfree(vc->buffer.frame[i].lpvbits);
vc->buffer.frame[i].lpvbits = NULL;
}
return 0;
}
static int s2255_board_init(struct s2255_dev *dev)
{
struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT;
int fw_ver;
int j;
struct s2255_pipeinfo *pipe = &dev->pipe;
dprintk(dev, 4, "board init: %p", dev);
memset(pipe, 0, sizeof(*pipe));
pipe->dev = dev;
pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
pipe->max_transfer_size = S2255_USB_XFER_SIZE;
pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
GFP_KERNEL);
if (pipe->transfer_buffer == NULL) {
dprintk(dev, 1, "out of memory!\n");
return -ENOMEM;
}
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
pr_info("s2255: usb firmware version %d.%d\n",
(fw_ver >> 8) & 0xff,
fw_ver & 0xff);
if (fw_ver < S2255_CUR_USB_FWVER)
pr_info("s2255: newer USB firmware available\n");
for (j = 0; j < MAX_CHANNELS; j++) {
struct s2255_vc *vc = &dev->vc[j];
vc->mode = mode_def;
if (dev->pid == 0x2257 && j > 1)
vc->mode.color |= (1 << 16);
vc->jpegqual = S2255_DEF_JPEG_QUAL;
vc->width = LINE_SZ_4CIFS_NTSC;
vc->height = NUM_LINES_4CIFS_NTSC * 2;
vc->std = V4L2_STD_NTSC_M;
vc->fmt = &formats[0];
vc->mode.restart = 1;
vc->req_image_size = get_transfer_size(&mode_def);
vc->frame_count = 0;
/* create the system buffers */
s2255_create_sys_buffers(vc);
}
/* start read pipe */
s2255_start_readpipe(dev);
dprintk(dev, 1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
dprintk(dev, 1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
if (vb2_is_streaming(&dev->vc[i].vb_vidq))
s2255_stop_acquire(&dev->vc[i]);
}
s2255_stop_readpipe(dev);
for (i = 0; i < MAX_CHANNELS; i++)
s2255_release_sys_buffers(&dev->vc[i]);
/* release transfer buffer */
kfree(dev->pipe.transfer_buffer);
return 0;
}
static void read_pipe_completion(struct urb *purb)
{
struct s2255_pipeinfo *pipe_info;
struct s2255_dev *dev;
int status;
int pipe;
pipe_info = purb->context;
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
dev = pipe_info->dev;
if (dev == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
dprintk(dev, 2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
dprintk(dev, 2, "%s: exiting USB pipe", __func__);
return;
}
if (status == 0)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
dprintk(dev, 1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
/* reuse urb */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
pipe_info->transfer_buffer,
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
if (pipe_info->state != 0) {
if (usb_submit_urb(pipe_info->stream_urb, GFP_ATOMIC))
dev_err(&dev->udev->dev, "error submitting urb\n");
} else {
dprintk(dev, 2, "%s :complete state 0\n", __func__);
}
return;
}
static int s2255_start_readpipe(struct s2255_dev *dev)
{
int pipe;
int retval;
struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
dprintk(dev, 2, "%s: IN %d\n", __func__, dev->read_endpoint);
pipe_info->state = 1;
pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pipe_info->stream_urb)
return -ENOMEM;
/* transfer buffer allocated in board_init */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
pipe_info->transfer_buffer,
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
pr_err("s2255: start read pipe failed\n");
return retval;
}
return 0;
}
/* starts acquisition process */
static int s2255_start_acquire(struct s2255_vc *vc)
{
int res;
unsigned long chn_rev;
int j;
struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
__le32 *buffer = dev->cmdbuf;
mutex_lock(&dev->cmdlock);
chn_rev = G_chnmap[vc->idx];
vc->last_frame = -1;
vc->bad_payload = 0;
vc->cur_frame = 0;
for (j = 0; j < SYS_FRAMES; j++) {
vc->buffer.frame[j].ulState = 0;
vc->buffer.frame[j].cur_size = 0;
}
/* send the start command */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
dprintk(dev, 2, "start acquire exit[%d] %d\n", vc->idx, res);
mutex_unlock(&dev->cmdlock);
return res;
}
static int s2255_stop_acquire(struct s2255_vc *vc)
{
int res;
unsigned long chn_rev;
struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
__le32 *buffer = dev->cmdbuf;
mutex_lock(&dev->cmdlock);
chn_rev = G_chnmap[vc->idx];
/* send the stop command */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_STOP;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
dprintk(dev, 4, "%s: chn %d, res %d\n", __func__, vc->idx, res);
mutex_unlock(&dev->cmdlock);
return res;
}
static void s2255_stop_readpipe(struct s2255_dev *dev)
{
struct s2255_pipeinfo *pipe = &dev->pipe;
pipe->state = 0;
if (pipe->stream_urb) {
/* cancel urb */
usb_kill_urb(pipe->stream_urb);
usb_free_urb(pipe->stream_urb);
pipe->stream_urb = NULL;
}
dprintk(dev, 4, "%s", __func__);
return;
}
static void s2255_fwload_start(struct s2255_dev *dev)
{
s2255_reset_dsppower(dev);
dev->fw_data->fw_size = dev->fw_data->fw->size;
atomic_set(&dev->fw_data->fw_state, S2255_FW_NOTLOADED);
memcpy(dev->fw_data->pfw_data,
dev->fw_data->fw->data, CHUNK_SIZE);
dev->fw_data->fw_loaded = CHUNK_SIZE;
usb_fill_bulk_urb(dev->fw_data->fw_urb, dev->udev,
usb_sndbulkpipe(dev->udev, 2),
dev->fw_data->pfw_data,
CHUNK_SIZE, s2255_fwchunk_complete,
dev->fw_data);
mod_timer(&dev->timer, jiffies + HZ);
}
/* standard usb probe function */
static int s2255_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct s2255_dev *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int i;
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
return -ENOMEM;
}
dev->cmdbuf = kzalloc(S2255_CMDBUF_SIZE, GFP_KERNEL);
if (dev->cmdbuf == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
goto errorFWDATA1;
}
refcount_set(&dev->num_channels, 0);
dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
goto errorFWDATA1;
mutex_init(&dev->lock);
mutex_init(&dev->cmdlock);
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
dev_err(&interface->dev, "null usb device\n");
retval = -ENODEV;
goto errorUDEV;
}
dev_dbg(&interface->dev, "dev: %p, udev %p interface %p\n",
dev, dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
dev_dbg(&interface->dev, "num EP: %d\n",
iface_desc->desc.bNumEndpoints);
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!dev->read_endpoint && usb_endpoint_is_bulk_in(endpoint)) {
/* we found the bulk in endpoint */
dev->read_endpoint = endpoint->bEndpointAddress;
}
}
if (!dev->read_endpoint) {
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
timer_setup(&dev->timer, s2255_timer, 0);
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_vc *vc = &dev->vc[i];
vc->idx = i;
vc->dev = dev;
init_waitqueue_head(&vc->wait_setmode);
init_waitqueue_head(&vc->wait_vidstatus);
spin_lock_init(&vc->qlock);
mutex_init(&vc->vb_lock);
}
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->fw_data->fw_urb)
goto errorFWURB;
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
dev_err(&interface->dev, "out of memory!\n");
goto errorFWDATA2;
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
dev_err(&interface->dev, "sensoray 2255 failed to get firmware\n");
goto errorREQFW;
}
/* check the firmware is valid */
fw_size = dev->fw_data->fw->size;
pdata = (__le32 *) &dev->fw_data->fw->data[fw_size - 8];
if (*pdata != S2255_FW_MARKER) {
dev_err(&interface->dev, "Firmware invalid.\n");
retval = -ENODEV;
goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
pr_info("s2255 dsp fw version %x\n", le32_to_cpu(*pRel));
dev->dsp_fw_ver = le32_to_cpu(*pRel);
if (dev->dsp_fw_ver < S2255_CUR_DSP_FWVER)
pr_info("s2255: f2255usb.bin out of date.\n");
if (dev->pid == 0x2257 &&
dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
pr_warn("2257 needs firmware %d or above.\n",
S2255_MIN_DSP_COLORFILTER);
}
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
goto errorBOARDINIT;
s2255_fwload_start(dev);
/* loads v4l specific */
retval = s2255_probe_v4l(dev);
if (retval)
goto errorBOARDINIT;
dev_info(&interface->dev, "Sensoray 2255 detected\n");
return 0;
errorBOARDINIT:
s2255_board_shutdown(dev);
errorFWMARKER:
release_firmware(dev->fw_data->fw);
errorREQFW:
kfree(dev->fw_data->pfw_data);
errorFWDATA2:
usb_free_urb(dev->fw_data->fw_urb);
errorFWURB:
timer_shutdown_sync(&dev->timer);
errorEP:
usb_put_dev(dev->udev);
errorUDEV:
kfree(dev->fw_data);
mutex_destroy(&dev->lock);
errorFWDATA1:
kfree(dev->cmdbuf);
kfree(dev);
pr_warn("Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
/* disconnect routine. when board is removed physically or with rmmod */
static void s2255_disconnect(struct usb_interface *interface)
{
struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
int channels = refcount_read(&dev->num_channels);
mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev);
mutex_unlock(&dev->lock);
/*see comments in the uvc_driver.c usb disconnect function */
refcount_inc(&dev->num_channels);
/* unregister each video device. */
for (i = 0; i < channels; i++)
video_unregister_device(&dev->vc[i].vdev);
/* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
dev->vc[i].setmode_ready = 1;
wake_up(&dev->vc[i].wait_setmode);
dev->vc[i].vidstatus_ready = 1;
wake_up(&dev->vc[i].wait_vidstatus);
}
if (refcount_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
dev_info(&interface->dev, "%s\n", __func__);
}
static struct usb_driver s2255_driver = {
.name = S2255_DRIVER_NAME,
.probe = s2255_probe,
.disconnect = s2255_disconnect,
.id_table = s2255_table,
};
module_usb_driver(s2255_driver);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
MODULE_LICENSE("GPL");
MODULE_VERSION(S2255_VERSION);
MODULE_FIRMWARE(FIRMWARE_FILE_NAME);
|
/* SPDX-License-Identifier: GPL-2.0 */
/*---------------------------------------------------------------------------+
| reg_constant.h |
| |
| Copyright (C) 1992 W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
| Australia. E-mail [email protected] |
| |
+---------------------------------------------------------------------------*/
#ifndef _REG_CONSTANT_H_
#define _REG_CONSTANT_H_
#include "fpu_emu.h"
extern FPU_REG const CONST_1;
extern FPU_REG const CONST_PI;
extern FPU_REG const CONST_PI2;
extern FPU_REG const CONST_PI2extra;
extern FPU_REG const CONST_PI4;
extern FPU_REG const CONST_Z;
extern FPU_REG const CONST_PINF;
extern FPU_REG const CONST_INF;
extern FPU_REG const CONST_MINF;
extern FPU_REG const CONST_QNaN;
#endif /* _REG_CONSTANT_H_ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 IBM Corp.
*
* Driver for the Nuvoton W83773G SMBus temperature sensor IC.
* Supported models: W83773G
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regmap.h>
/* W83773 has 3 channels */
#define W83773_CHANNELS 3
/* The W83773 registers */
#define W83773_CONVERSION_RATE_REG_READ 0x04
#define W83773_CONVERSION_RATE_REG_WRITE 0x0A
#define W83773_MANUFACTURER_ID_REG 0xFE
#define W83773_LOCAL_TEMP 0x00
static const u8 W83773_STATUS[2] = { 0x02, 0x17 };
static const u8 W83773_TEMP_LSB[2] = { 0x10, 0x25 };
static const u8 W83773_TEMP_MSB[2] = { 0x01, 0x24 };
static const u8 W83773_OFFSET_LSB[2] = { 0x12, 0x16 };
static const u8 W83773_OFFSET_MSB[2] = { 0x11, 0x15 };
/* this is the number of sensors in the device */
static const struct i2c_device_id w83773_id[] = {
{ "w83773g" },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83773_id);
static const struct of_device_id __maybe_unused w83773_of_match[] = {
{
.compatible = "nuvoton,w83773g"
},
{ },
};
MODULE_DEVICE_TABLE(of, w83773_of_match);
static inline long temp_of_local(s8 reg)
{
return reg * 1000;
}
static inline long temp_of_remote(s8 hb, u8 lb)
{
return (hb << 3 | lb >> 5) * 125;
}
static int get_local_temp(struct regmap *regmap, long *val)
{
unsigned int regval;
int ret;
ret = regmap_read(regmap, W83773_LOCAL_TEMP, ®val);
if (ret < 0)
return ret;
*val = temp_of_local(regval);
return 0;
}
static int get_remote_temp(struct regmap *regmap, int index, long *val)
{
unsigned int regval_high;
unsigned int regval_low;
int ret;
ret = regmap_read(regmap, W83773_TEMP_MSB[index], ®val_high);
if (ret < 0)
return ret;
ret = regmap_read(regmap, W83773_TEMP_LSB[index], ®val_low);
if (ret < 0)
return ret;
*val = temp_of_remote(regval_high, regval_low);
return 0;
}
static int get_fault(struct regmap *regmap, int index, long *val)
{
unsigned int regval;
int ret;
ret = regmap_read(regmap, W83773_STATUS[index], ®val);
if (ret < 0)
return ret;
*val = (regval & 0x04) >> 2;
return 0;
}
static int get_offset(struct regmap *regmap, int index, long *val)
{
unsigned int regval_high;
unsigned int regval_low;
int ret;
ret = regmap_read(regmap, W83773_OFFSET_MSB[index], ®val_high);
if (ret < 0)
return ret;
ret = regmap_read(regmap, W83773_OFFSET_LSB[index], ®val_low);
if (ret < 0)
return ret;
*val = temp_of_remote(regval_high, regval_low);
return 0;
}
static int set_offset(struct regmap *regmap, int index, long val)
{
int ret;
u8 high_byte;
u8 low_byte;
val = clamp_val(val, -127825, 127825);
/* offset value equals to (high_byte << 3 | low_byte >> 5) * 125 */
val /= 125;
high_byte = val >> 3;
low_byte = (val & 0x07) << 5;
ret = regmap_write(regmap, W83773_OFFSET_MSB[index], high_byte);
if (ret < 0)
return ret;
return regmap_write(regmap, W83773_OFFSET_LSB[index], low_byte);
}
static int get_update_interval(struct regmap *regmap, long *val)
{
unsigned int regval;
int ret;
ret = regmap_read(regmap, W83773_CONVERSION_RATE_REG_READ, ®val);
if (ret < 0)
return ret;
*val = 16000 >> regval;
return 0;
}
static int set_update_interval(struct regmap *regmap, long val)
{
int rate;
/*
* For valid rates, interval can be calculated as
* interval = (1 << (8 - rate)) * 62.5;
* Rounded rate is therefore
* rate = 8 - __fls(interval * 8 / (62.5 * 7));
* Use clamp_val() to avoid overflows, and to ensure valid input
* for __fls.
*/
val = clamp_val(val, 62, 16000) * 10;
rate = 8 - __fls((val * 8 / (625 * 7)));
return regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, rate);
}
static int w83773_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct regmap *regmap = dev_get_drvdata(dev);
if (type == hwmon_chip) {
if (attr == hwmon_chip_update_interval)
return get_update_interval(regmap, val);
return -EOPNOTSUPP;
}
switch (attr) {
case hwmon_temp_input:
if (channel == 0)
return get_local_temp(regmap, val);
return get_remote_temp(regmap, channel - 1, val);
case hwmon_temp_fault:
return get_fault(regmap, channel - 1, val);
case hwmon_temp_offset:
return get_offset(regmap, channel - 1, val);
default:
return -EOPNOTSUPP;
}
}
static int w83773_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
struct regmap *regmap = dev_get_drvdata(dev);
if (type == hwmon_chip && attr == hwmon_chip_update_interval)
return set_update_interval(regmap, val);
if (type == hwmon_temp && attr == hwmon_temp_offset)
return set_offset(regmap, channel - 1, val);
return -EOPNOTSUPP;
}
static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
switch (type) {
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
return 0644;
}
break;
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
case hwmon_temp_fault:
return 0444;
case hwmon_temp_offset:
return 0644;
}
break;
default:
break;
}
return 0;
}
static const struct hwmon_channel_info * const w83773_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT,
HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET),
NULL
};
static const struct hwmon_ops w83773_ops = {
.is_visible = w83773_is_visible,
.read = w83773_read,
.write = w83773_write,
};
static const struct hwmon_chip_info w83773_chip_info = {
.ops = &w83773_ops,
.info = w83773_info,
};
static const struct regmap_config w83773_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int w83773_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct regmap *regmap;
int ret;
regmap = devm_regmap_init_i2c(client, &w83773_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to allocate register map\n");
return PTR_ERR(regmap);
}
/* Set the conversion rate to 2 Hz */
ret = regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, 0x05);
if (ret < 0) {
dev_err(&client->dev, "error writing config rate register\n");
return ret;
}
i2c_set_clientdata(client, regmap);
hwmon_dev = devm_hwmon_device_register_with_info(dev,
client->name,
regmap,
&w83773_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct i2c_driver w83773_driver = {
.driver = {
.name = "w83773g",
.of_match_table = of_match_ptr(w83773_of_match),
},
.probe = w83773_probe,
.id_table = w83773_id,
};
module_i2c_driver(w83773_driver);
MODULE_AUTHOR("Lei YU <[email protected]>");
MODULE_DESCRIPTION("W83773G temperature sensor driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1995, 1996 Gero Kuhlmann <[email protected]>
*
* Allow an NFS filesystem to be mounted as root. The way this works is:
* (1) Use the IP autoconfig mechanism to set local IP addresses and routes.
* (2) Construct the device string and the options string using DHCP
* option 17 and/or kernel command line options.
* (3) When mount_root() sets up the root file system, pass these strings
* to the NFS client's regular mount interface via sys_mount().
*
*
* Changes:
*
* Alan Cox : Removed get_address name clash with FPU.
* Alan Cox : Reformatted a bit.
* Gero Kuhlmann : Code cleanup
* Michael Rausch : Fixed recognition of an incoming RARP answer.
* Martin Mares : (2.0) Auto-configuration via BOOTP supported.
* Martin Mares : Manual selection of interface & BOOTP/RARP.
* Martin Mares : Using network routes instead of host routes,
* allowing the default configuration to be used
* for normal operation of the host.
* Martin Mares : Randomized timer with exponential backoff
* installed to minimize network congestion.
* Martin Mares : Code cleanup.
* Martin Mares : (2.1) BOOTP and RARP made configuration options.
* Martin Mares : Server hostname generation fixed.
* Gerd Knorr : Fixed wired inode handling
* Martin Mares : (2.2) "0.0.0.0" addresses from command line ignored.
* Martin Mares : RARP replies not tested for server address.
* Gero Kuhlmann : (2.3) Some bug fixes and code cleanup again (please
* send me your new patches _before_ bothering
* Linus so that I don' always have to cleanup
* _afterwards_ - thanks)
* Gero Kuhlmann : Last changes of Martin Mares undone.
* Gero Kuhlmann : RARP replies are tested for specified server
* again. However, it's now possible to have
* different RARP and NFS servers.
* Gero Kuhlmann : "0.0.0.0" addresses from command line are
* now mapped to INADDR_NONE.
* Gero Kuhlmann : Fixed a bug which prevented BOOTP path name
* from being used (thanks to Leo Spiekman)
* Andy Walker : Allow to specify the NFS server in nfs_root
* without giving a path name
* Swen Thümmler : Allow to specify the NFS options in nfs_root
* without giving a path name. Fix BOOTP request
* for domainname (domainname is NIS domain, not
* DNS domain!). Skip dummy devices for BOOTP.
* Jacek Zapala : Fixed a bug which prevented server-ip address
* from nfsroot parameter from being used.
* Olaf Kirch : Adapted to new NFS code.
* Jakub Jelinek : Free used code segment.
* Marko Kohtala : Fixed some bugs.
* Martin Mares : Debug message cleanup
* Martin Mares : Changed to use the new generic IP layer autoconfig
* code. BOOTP and RARP moved there.
* Martin Mares : Default path now contains host name instead of
* host IP address (but host name defaults to IP
* address anyway).
* Martin Mares : Use root_server_addr appropriately during setup.
* Martin Mares : Rewrote parameter parsing, now hopefully giving
* correct overriding.
* Trond Myklebust : Add in preliminary support for NFSv3 and TCP.
* Fix bug in root_nfs_addr(). nfs_data.namlen
* is NOT for the length of the hostname.
* Hua Qin : Support for mounting root file system via
* NFS over TCP.
* Fabian Frederick: Option parser rebuilt (using parser lib)
* Chuck Lever : Use super.c's text-based mount option parsing
* Chuck Lever : Add "nfsrootdebug".
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/nfs.h>
#include <linux/nfs_fs.h>
#include <linux/utsname.h>
#include <linux/root_dev.h>
#include <net/ipconfig.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_ROOT
/* Default path we try to mount. "%s" gets replaced by our IP address */
#define NFS_ROOT "/tftpboot/%s"
/* Default NFSROOT mount options. */
#if defined(CONFIG_NFS_V2)
#define NFS_DEF_OPTIONS "vers=2,tcp,rsize=4096,wsize=4096"
#elif defined(CONFIG_NFS_V3)
#define NFS_DEF_OPTIONS "vers=3,tcp,rsize=4096,wsize=4096"
#else
#define NFS_DEF_OPTIONS "vers=4,tcp,rsize=4096,wsize=4096"
#endif
/* Parameters passed from the kernel command line */
static char nfs_root_parms[NFS_MAXPATHLEN + 1] __initdata = "";
/* Text-based mount options passed to super.c */
static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
/* Address of NFS server */
static __be32 servaddr __initdata = htonl(INADDR_NONE);
/* Name of directory to mount */
static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = "";
/* server:export path string passed to super.c */
static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = "";
#ifdef NFS_DEBUG
/*
* When the "nfsrootdebug" kernel command line option is specified,
* enable debugging messages for NFSROOT.
*/
static int __init nfs_root_debug(char *__unused)
{
nfs_debug |= NFSDBG_ROOT | NFSDBG_MOUNT;
return 1;
}
__setup("nfsrootdebug", nfs_root_debug);
#endif
/*
* Parse NFS server and directory information passed on the kernel
* command line.
*
* nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
*
* If there is a "%s" token in the <root-dir> string, it is replaced
* by the ASCII-representation of the client's IP address.
*/
static int __init nfs_root_setup(char *line)
{
ROOT_DEV = Root_NFS;
if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
strscpy(nfs_root_parms, line, sizeof(nfs_root_parms));
} else {
size_t n = strlen(line) + sizeof(NFS_ROOT) - 1;
if (n >= sizeof(nfs_root_parms))
line[sizeof(nfs_root_parms) - sizeof(NFS_ROOT) - 2] = '\0';
sprintf(nfs_root_parms, NFS_ROOT, line);
}
/*
* Extract the IP address of the NFS server containing our
* root file system, if one was specified.
*
* Note: root_nfs_parse_addr() removes the server-ip from
* nfs_root_parms, if it exists.
*/
root_server_addr = root_nfs_parse_addr(nfs_root_parms);
return 1;
}
__setup("nfsroot=", nfs_root_setup);
static int __init root_nfs_copy(char *dest, const char *src,
const size_t destlen)
{
if (strscpy(dest, src, destlen) == -E2BIG)
return -1;
return 0;
}
static int __init root_nfs_cat(char *dest, const char *src,
const size_t destlen)
{
size_t len = strlen(dest);
if (len && dest[len - 1] != ',')
if (strlcat(dest, ",", destlen) >= destlen)
return -1;
if (strlcat(dest, src, destlen) >= destlen)
return -1;
return 0;
}
/*
* Parse out root export path and mount options from
* passed-in string @incoming.
*
* Copy the export path into @exppath.
*/
static int __init root_nfs_parse_options(char *incoming, char *exppath,
const size_t exppathlen)
{
char *p;
/*
* Set the NFS remote path
*/
p = strsep(&incoming, ",");
if (*p != '\0' && strcmp(p, "default") != 0)
if (root_nfs_copy(exppath, p, exppathlen))
return -1;
/*
* @incoming now points to the rest of the string; if it
* contains something, append it to our root options buffer
*/
if (incoming != NULL && *incoming != '\0')
if (root_nfs_cat(nfs_root_options, incoming,
sizeof(nfs_root_options)))
return -1;
return 0;
}
/*
* Decode the export directory path name and NFS options from
* the kernel command line. This has to be done late in order to
* use a dynamically acquired client IP address for the remote
* root directory path.
*
* Returns zero if successful; otherwise -1 is returned.
*/
static int __init root_nfs_data(char *cmdline)
{
char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
int len, retval = -1;
char *tmp = NULL;
const size_t tmplen = sizeof(nfs_export_path);
tmp = kzalloc(tmplen, GFP_KERNEL);
if (tmp == NULL)
goto out_nomem;
strcpy(tmp, NFS_ROOT);
if (root_server_path[0] != '\0') {
dprintk("Root-NFS: DHCPv4 option 17: %s\n",
root_server_path);
if (root_nfs_parse_options(root_server_path, tmp, tmplen))
goto out_optionstoolong;
}
if (cmdline[0] != '\0') {
dprintk("Root-NFS: nfsroot=%s\n", cmdline);
if (root_nfs_parse_options(cmdline, tmp, tmplen))
goto out_optionstoolong;
}
/*
* Append mandatory options for nfsroot so they override
* what has come before
*/
snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
&servaddr);
if (root_nfs_cat(nfs_root_options, mand_options,
sizeof(nfs_root_options)))
goto out_optionstoolong;
/*
* Set up nfs_root_device. For NFS mounts, this looks like
*
* server:/path
*
* At this point, utsname()->nodename contains our local
* IP address or hostname, set by ipconfig. If "%s" exists
* in tmp, substitute the nodename, then shovel the whole
* mess into nfs_root_device.
*/
len = snprintf(nfs_export_path, sizeof(nfs_export_path),
tmp, utsname()->nodename);
if (len >= (int)sizeof(nfs_export_path))
goto out_devnametoolong;
len = snprintf(nfs_root_device, sizeof(nfs_root_device),
"%pI4:%s", &servaddr, nfs_export_path);
if (len >= (int)sizeof(nfs_root_device))
goto out_devnametoolong;
retval = 0;
out:
kfree(tmp);
return retval;
out_nomem:
printk(KERN_ERR "Root-NFS: could not allocate memory\n");
goto out;
out_optionstoolong:
printk(KERN_ERR "Root-NFS: mount options string too long\n");
goto out;
out_devnametoolong:
printk(KERN_ERR "Root-NFS: root device name too long.\n");
goto out;
}
/**
* nfs_root_data - Return prepared 'data' for NFSROOT mount
* @root_device: OUT: address of string containing NFSROOT device
* @root_data: OUT: address of string containing NFSROOT mount options
*
* Returns zero and sets @root_device and @root_data if successful,
* otherwise -1 is returned.
*/
int __init nfs_root_data(char **root_device, char **root_data)
{
servaddr = root_server_addr;
if (servaddr == htonl(INADDR_NONE)) {
printk(KERN_ERR "Root-NFS: no NFS server address\n");
return -1;
}
if (root_nfs_data(nfs_root_parms) < 0)
return -1;
*root_device = nfs_root_device;
*root_data = nfs_root_options;
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PRU-ICSS INTC IRQChip driver for various TI SoCs
*
* Copyright (C) 2016-2020 Texas Instruments Incorporated - http://www.ti.com/
*
* Author(s):
* Andrew F. Davis <[email protected]>
* Suman Anna <[email protected]>
* Grzegorz Jaszczyk <[email protected]> for Texas Instruments
*
* Copyright (C) 2019 David Lechner <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/*
* Number of host interrupts reaching the main MPU sub-system. Note that this
* is not the same as the total number of host interrupts supported by the PRUSS
* INTC instance
*/
#define MAX_NUM_HOST_IRQS 8
/* minimum starting host interrupt number for MPU */
#define FIRST_PRU_HOST_INT 2
/* PRU_ICSS_INTC registers */
#define PRU_INTC_REVID 0x0000
#define PRU_INTC_CR 0x0004
#define PRU_INTC_GER 0x0010
#define PRU_INTC_GNLR 0x001c
#define PRU_INTC_SISR 0x0020
#define PRU_INTC_SICR 0x0024
#define PRU_INTC_EISR 0x0028
#define PRU_INTC_EICR 0x002c
#define PRU_INTC_HIEISR 0x0034
#define PRU_INTC_HIDISR 0x0038
#define PRU_INTC_GPIR 0x0080
#define PRU_INTC_SRSR(x) (0x0200 + (x) * 4)
#define PRU_INTC_SECR(x) (0x0280 + (x) * 4)
#define PRU_INTC_ESR(x) (0x0300 + (x) * 4)
#define PRU_INTC_ECR(x) (0x0380 + (x) * 4)
#define PRU_INTC_CMR(x) (0x0400 + (x) * 4)
#define PRU_INTC_HMR(x) (0x0800 + (x) * 4)
#define PRU_INTC_HIPIR(x) (0x0900 + (x) * 4)
#define PRU_INTC_SIPR(x) (0x0d00 + (x) * 4)
#define PRU_INTC_SITR(x) (0x0d80 + (x) * 4)
#define PRU_INTC_HINLR(x) (0x1100 + (x) * 4)
#define PRU_INTC_HIER 0x1500
/* CMR register bit-field macros */
#define CMR_EVT_MAP_MASK 0xf
#define CMR_EVT_MAP_BITS 8
#define CMR_EVT_PER_REG 4
/* HMR register bit-field macros */
#define HMR_CH_MAP_MASK 0xf
#define HMR_CH_MAP_BITS 8
#define HMR_CH_PER_REG 4
/* HIPIR register bit-fields */
#define INTC_HIPIR_NONE_HINT 0x80000000
#define MAX_PRU_SYS_EVENTS 160
#define MAX_PRU_CHANNELS 20
/**
* struct pruss_intc_map_record - keeps track of actual mapping state
* @value: The currently mapped value (channel or host)
* @ref_count: Keeps track of number of current users of this resource
*/
struct pruss_intc_map_record {
u8 value;
u8 ref_count;
};
/**
* struct pruss_intc_match_data - match data to handle SoC variations
* @num_system_events: number of input system events handled by the PRUSS INTC
* @num_host_events: number of host events (which is equal to number of
* channels) supported by the PRUSS INTC
*/
struct pruss_intc_match_data {
u8 num_system_events;
u8 num_host_events;
};
/**
* struct pruss_intc - PRUSS interrupt controller structure
* @event_channel: current state of system event to channel mappings
* @channel_host: current state of channel to host mappings
* @irqs: kernel irq numbers corresponding to PRUSS host interrupts
* @base: base virtual address of INTC register space
* @domain: irq domain for this interrupt controller
* @soc_config: cached PRUSS INTC IP configuration data
* @dev: PRUSS INTC device pointer
* @lock: mutex to serialize interrupts mapping
*/
struct pruss_intc {
struct pruss_intc_map_record event_channel[MAX_PRU_SYS_EVENTS];
struct pruss_intc_map_record channel_host[MAX_PRU_CHANNELS];
unsigned int irqs[MAX_NUM_HOST_IRQS];
void __iomem *base;
struct irq_domain *domain;
const struct pruss_intc_match_data *soc_config;
struct device *dev;
struct mutex lock; /* PRUSS INTC lock */
};
/**
* struct pruss_host_irq_data - PRUSS host irq data structure
* @intc: PRUSS interrupt controller pointer
* @host_irq: host irq number
*/
struct pruss_host_irq_data {
struct pruss_intc *intc;
u8 host_irq;
};
static inline u32 pruss_intc_read_reg(struct pruss_intc *intc, unsigned int reg)
{
return readl_relaxed(intc->base + reg);
}
static inline void pruss_intc_write_reg(struct pruss_intc *intc,
unsigned int reg, u32 val)
{
writel_relaxed(val, intc->base + reg);
}
static void pruss_intc_update_cmr(struct pruss_intc *intc, unsigned int evt,
u8 ch)
{
u32 idx, offset, val;
idx = evt / CMR_EVT_PER_REG;
offset = (evt % CMR_EVT_PER_REG) * CMR_EVT_MAP_BITS;
val = pruss_intc_read_reg(intc, PRU_INTC_CMR(idx));
val &= ~(CMR_EVT_MAP_MASK << offset);
val |= ch << offset;
pruss_intc_write_reg(intc, PRU_INTC_CMR(idx), val);
dev_dbg(intc->dev, "SYSEV%u -> CH%d (CMR%d 0x%08x)\n", evt, ch,
idx, pruss_intc_read_reg(intc, PRU_INTC_CMR(idx)));
}
static void pruss_intc_update_hmr(struct pruss_intc *intc, u8 ch, u8 host)
{
u32 idx, offset, val;
idx = ch / HMR_CH_PER_REG;
offset = (ch % HMR_CH_PER_REG) * HMR_CH_MAP_BITS;
val = pruss_intc_read_reg(intc, PRU_INTC_HMR(idx));
val &= ~(HMR_CH_MAP_MASK << offset);
val |= host << offset;
pruss_intc_write_reg(intc, PRU_INTC_HMR(idx), val);
dev_dbg(intc->dev, "CH%d -> HOST%d (HMR%d 0x%08x)\n", ch, host, idx,
pruss_intc_read_reg(intc, PRU_INTC_HMR(idx)));
}
/**
* pruss_intc_map() - configure the PRUSS INTC
* @intc: PRUSS interrupt controller pointer
* @hwirq: the system event number
*
* Configures the PRUSS INTC with the provided configuration from the one parsed
* in the xlate function.
*/
static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
{
struct device *dev = intc->dev;
u8 ch, host, reg_idx;
u32 val;
mutex_lock(&intc->lock);
intc->event_channel[hwirq].ref_count++;
ch = intc->event_channel[hwirq].value;
host = intc->channel_host[ch].value;
pruss_intc_update_cmr(intc, hwirq, ch);
reg_idx = hwirq / 32;
val = BIT(hwirq % 32);
/* clear and enable system event */
pruss_intc_write_reg(intc, PRU_INTC_ESR(reg_idx), val);
pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
if (++intc->channel_host[ch].ref_count == 1) {
pruss_intc_update_hmr(intc, ch, host);
/* enable host interrupts */
pruss_intc_write_reg(intc, PRU_INTC_HIEISR, host);
}
dev_dbg(dev, "mapped system_event = %lu channel = %d host = %d",
hwirq, ch, host);
mutex_unlock(&intc->lock);
}
/**
* pruss_intc_unmap() - unconfigure the PRUSS INTC
* @intc: PRUSS interrupt controller pointer
* @hwirq: the system event number
*
* Undo whatever was done in pruss_intc_map() for a PRU core.
* Mappings are reference counted, so resources are only disabled when there
* are no longer any users.
*/
static void pruss_intc_unmap(struct pruss_intc *intc, unsigned long hwirq)
{
u8 ch, host, reg_idx;
u32 val;
mutex_lock(&intc->lock);
ch = intc->event_channel[hwirq].value;
host = intc->channel_host[ch].value;
if (--intc->channel_host[ch].ref_count == 0) {
/* disable host interrupts */
pruss_intc_write_reg(intc, PRU_INTC_HIDISR, host);
/* clear the map using reset value 0 */
pruss_intc_update_hmr(intc, ch, 0);
}
intc->event_channel[hwirq].ref_count--;
reg_idx = hwirq / 32;
val = BIT(hwirq % 32);
/* disable system events */
pruss_intc_write_reg(intc, PRU_INTC_ECR(reg_idx), val);
/* clear any pending status */
pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
/* clear the map using reset value 0 */
pruss_intc_update_cmr(intc, hwirq, 0);
dev_dbg(intc->dev, "unmapped system_event = %lu channel = %d host = %d\n",
hwirq, ch, host);
mutex_unlock(&intc->lock);
}
static void pruss_intc_init(struct pruss_intc *intc)
{
const struct pruss_intc_match_data *soc_config = intc->soc_config;
int num_chnl_map_regs, num_host_intr_regs, num_event_type_regs, i;
num_chnl_map_regs = DIV_ROUND_UP(soc_config->num_system_events,
CMR_EVT_PER_REG);
num_host_intr_regs = DIV_ROUND_UP(soc_config->num_host_events,
HMR_CH_PER_REG);
num_event_type_regs = DIV_ROUND_UP(soc_config->num_system_events, 32);
/*
* configure polarity (SIPR register) to active high and
* type (SITR register) to level interrupt for all system events
*/
for (i = 0; i < num_event_type_regs; i++) {
pruss_intc_write_reg(intc, PRU_INTC_SIPR(i), 0xffffffff);
pruss_intc_write_reg(intc, PRU_INTC_SITR(i), 0);
}
/* clear all interrupt channel map registers, 4 events per register */
for (i = 0; i < num_chnl_map_regs; i++)
pruss_intc_write_reg(intc, PRU_INTC_CMR(i), 0);
/* clear all host interrupt map registers, 4 channels per register */
for (i = 0; i < num_host_intr_regs; i++)
pruss_intc_write_reg(intc, PRU_INTC_HMR(i), 0);
/* global interrupt enable */
pruss_intc_write_reg(intc, PRU_INTC_GER, 1);
}
static void pruss_intc_irq_ack(struct irq_data *data)
{
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
unsigned int hwirq = data->hwirq;
pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
}
static void pruss_intc_irq_mask(struct irq_data *data)
{
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
unsigned int hwirq = data->hwirq;
pruss_intc_write_reg(intc, PRU_INTC_EICR, hwirq);
}
static void pruss_intc_irq_unmask(struct irq_data *data)
{
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
unsigned int hwirq = data->hwirq;
pruss_intc_write_reg(intc, PRU_INTC_EISR, hwirq);
}
static int pruss_intc_irq_reqres(struct irq_data *data)
{
if (!try_module_get(THIS_MODULE))
return -ENODEV;
return 0;
}
static void pruss_intc_irq_relres(struct irq_data *data)
{
module_put(THIS_MODULE);
}
static int pruss_intc_irq_get_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state)
{
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
u32 reg, mask, srsr;
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
reg = PRU_INTC_SRSR(data->hwirq / 32);
mask = BIT(data->hwirq % 32);
srsr = pruss_intc_read_reg(intc, reg);
*state = !!(srsr & mask);
return 0;
}
static int pruss_intc_irq_set_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which,
bool state)
{
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
if (state)
pruss_intc_write_reg(intc, PRU_INTC_SISR, data->hwirq);
else
pruss_intc_write_reg(intc, PRU_INTC_SICR, data->hwirq);
return 0;
}
static struct irq_chip pruss_irqchip = {
.name = "pruss-intc",
.irq_ack = pruss_intc_irq_ack,
.irq_mask = pruss_intc_irq_mask,
.irq_unmask = pruss_intc_irq_unmask,
.irq_request_resources = pruss_intc_irq_reqres,
.irq_release_resources = pruss_intc_irq_relres,
.irq_get_irqchip_state = pruss_intc_irq_get_irqchip_state,
.irq_set_irqchip_state = pruss_intc_irq_set_irqchip_state,
};
static int pruss_intc_validate_mapping(struct pruss_intc *intc, int event,
int channel, int host)
{
struct device *dev = intc->dev;
int ret = 0;
mutex_lock(&intc->lock);
/* check if sysevent already assigned */
if (intc->event_channel[event].ref_count > 0 &&
intc->event_channel[event].value != channel) {
dev_err(dev, "event %d (req. ch %d) already assigned to channel %d\n",
event, channel, intc->event_channel[event].value);
ret = -EBUSY;
goto unlock;
}
/* check if channel already assigned */
if (intc->channel_host[channel].ref_count > 0 &&
intc->channel_host[channel].value != host) {
dev_err(dev, "channel %d (req. host %d) already assigned to host %d\n",
channel, host, intc->channel_host[channel].value);
ret = -EBUSY;
goto unlock;
}
intc->event_channel[event].value = channel;
intc->channel_host[channel].value = host;
unlock:
mutex_unlock(&intc->lock);
return ret;
}
static int
pruss_intc_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
struct pruss_intc *intc = d->host_data;
struct device *dev = intc->dev;
int ret, sys_event, channel, host;
if (intsize < 3)
return -EINVAL;
sys_event = intspec[0];
if (sys_event < 0 || sys_event >= intc->soc_config->num_system_events) {
dev_err(dev, "%d is not valid event number\n", sys_event);
return -EINVAL;
}
channel = intspec[1];
if (channel < 0 || channel >= intc->soc_config->num_host_events) {
dev_err(dev, "%d is not valid channel number", channel);
return -EINVAL;
}
host = intspec[2];
if (host < 0 || host >= intc->soc_config->num_host_events) {
dev_err(dev, "%d is not valid host irq number\n", host);
return -EINVAL;
}
/* check if requested sys_event was already mapped, if so validate it */
ret = pruss_intc_validate_mapping(intc, sys_event, channel, host);
if (ret)
return ret;
*out_hwirq = sys_event;
*out_type = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
static int pruss_intc_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
struct pruss_intc *intc = d->host_data;
pruss_intc_map(intc, hw);
irq_set_chip_data(virq, intc);
irq_set_chip_and_handler(virq, &pruss_irqchip, handle_level_irq);
return 0;
}
static void pruss_intc_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
{
struct pruss_intc *intc = d->host_data;
unsigned long hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
irq_set_chip_and_handler(virq, NULL, NULL);
irq_set_chip_data(virq, NULL);
pruss_intc_unmap(intc, hwirq);
}
static const struct irq_domain_ops pruss_intc_irq_domain_ops = {
.xlate = pruss_intc_irq_domain_xlate,
.map = pruss_intc_irq_domain_map,
.unmap = pruss_intc_irq_domain_unmap,
};
static void pruss_intc_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct pruss_host_irq_data *host_irq_data = irq_get_handler_data(irq);
struct pruss_intc *intc = host_irq_data->intc;
u8 host_irq = host_irq_data->host_irq + FIRST_PRU_HOST_INT;
chained_irq_enter(chip, desc);
while (true) {
u32 hipir;
int hwirq, err;
/* get highest priority pending PRUSS system event */
hipir = pruss_intc_read_reg(intc, PRU_INTC_HIPIR(host_irq));
if (hipir & INTC_HIPIR_NONE_HINT)
break;
hwirq = hipir & GENMASK(9, 0);
err = generic_handle_domain_irq(intc->domain, hwirq);
/*
* NOTE: manually ACK any system events that do not have a
* handler mapped yet
*/
if (WARN_ON_ONCE(err))
pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
}
chained_irq_exit(chip, desc);
}
static const char * const irq_names[MAX_NUM_HOST_IRQS] = {
"host_intr0", "host_intr1", "host_intr2", "host_intr3",
"host_intr4", "host_intr5", "host_intr6", "host_intr7",
};
static int pruss_intc_probe(struct platform_device *pdev)
{
const struct pruss_intc_match_data *data;
struct device *dev = &pdev->dev;
struct pruss_intc *intc;
struct pruss_host_irq_data *host_data;
int i, irq, ret;
u8 max_system_events, irqs_reserved = 0;
data = of_device_get_match_data(dev);
if (!data)
return -ENODEV;
max_system_events = data->num_system_events;
intc = devm_kzalloc(dev, sizeof(*intc), GFP_KERNEL);
if (!intc)
return -ENOMEM;
intc->soc_config = data;
intc->dev = dev;
platform_set_drvdata(pdev, intc);
intc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(intc->base))
return PTR_ERR(intc->base);
ret = of_property_read_u8(dev->of_node, "ti,irqs-reserved",
&irqs_reserved);
/*
* The irqs-reserved is used only for some SoC's therefore not having
* this property is still valid
*/
if (ret < 0 && ret != -EINVAL)
return ret;
pruss_intc_init(intc);
mutex_init(&intc->lock);
intc->domain = irq_domain_add_linear(dev->of_node, max_system_events,
&pruss_intc_irq_domain_ops, intc);
if (!intc->domain)
return -ENOMEM;
for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
if (irqs_reserved & BIT(i))
continue;
irq = platform_get_irq_byname(pdev, irq_names[i]);
if (irq < 0) {
ret = irq;
goto fail_irq;
}
intc->irqs[i] = irq;
host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
if (!host_data) {
ret = -ENOMEM;
goto fail_irq;
}
host_data->intc = intc;
host_data->host_irq = i;
irq_set_handler_data(irq, host_data);
irq_set_chained_handler(irq, pruss_intc_irq_handler);
}
return 0;
fail_irq:
while (--i >= 0) {
if (intc->irqs[i])
irq_set_chained_handler_and_data(intc->irqs[i], NULL,
NULL);
}
irq_domain_remove(intc->domain);
return ret;
}
static void pruss_intc_remove(struct platform_device *pdev)
{
struct pruss_intc *intc = platform_get_drvdata(pdev);
u8 max_system_events = intc->soc_config->num_system_events;
unsigned int hwirq;
int i;
for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
if (intc->irqs[i])
irq_set_chained_handler_and_data(intc->irqs[i], NULL,
NULL);
}
for (hwirq = 0; hwirq < max_system_events; hwirq++)
irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq));
irq_domain_remove(intc->domain);
}
static const struct pruss_intc_match_data pruss_intc_data = {
.num_system_events = 64,
.num_host_events = 10,
};
static const struct pruss_intc_match_data icssg_intc_data = {
.num_system_events = 160,
.num_host_events = 20,
};
static const struct of_device_id pruss_intc_of_match[] = {
{
.compatible = "ti,pruss-intc",
.data = &pruss_intc_data,
},
{
.compatible = "ti,icssg-intc",
.data = &icssg_intc_data,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, pruss_intc_of_match);
static struct platform_driver pruss_intc_driver = {
.driver = {
.name = "pruss-intc",
.of_match_table = pruss_intc_of_match,
.suppress_bind_attrs = true,
},
.probe = pruss_intc_probe,
.remove = pruss_intc_remove,
};
module_platform_driver(pruss_intc_driver);
MODULE_AUTHOR("Andrew F. Davis <[email protected]>");
MODULE_AUTHOR("Suman Anna <[email protected]>");
MODULE_AUTHOR("Grzegorz Jaszczyk <[email protected]>");
MODULE_DESCRIPTION("TI PRU-ICSS INTC Driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* Hisilicon Hibmc SoC drm driver
*
* Based on the bochs drm driver.
*
* Copyright (c) 2016 Huawei Limited.
*
* Author:
* Rongrong Zou <[email protected]>
* Rongrong Zou <[email protected]>
* Jianhua Li <[email protected]>
*/
#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 status;
status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
if (status & HIBMC_RAW_INTERRUPT_VBLANK(1)) {
writel(HIBMC_RAW_INTERRUPT_VBLANK(1),
priv->mmio + HIBMC_RAW_INTERRUPT);
drm_handle_vblank(dev, 0);
}
return IRQ_HANDLED;
}
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
}
static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
.name = "hibmc",
.date = "20160828",
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
.debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
DRM_FBDEV_TTM_DRIVER_OPS,
};
static int __maybe_unused hibmc_pm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int __maybe_unused hibmc_pm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
static const struct dev_pm_ops hibmc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(hibmc_pm_suspend,
hibmc_pm_resume)
};
static const struct drm_mode_config_funcs hibmc_mode_funcs = {
.mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
ret = hibmc_de_init(priv);
if (ret) {
drm_err(dev, "failed to init de: %d\n", ret);
return ret;
}
ret = hibmc_vdac_init(priv);
if (ret) {
drm_err(dev, "failed to init vdac: %d\n", ret);
return ret;
}
return 0;
}
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode)
{
u32 control_value = 0;
void __iomem *mmio = priv->mmio;
u32 input = 1;
if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
return;
if (power_mode == HIBMC_PW_MODE_CTL_MODE_SLEEP)
input = 0;
control_value = readl(mmio + HIBMC_POWER_MODE_CTRL);
control_value &= ~(HIBMC_PW_MODE_CTL_MODE_MASK |
HIBMC_PW_MODE_CTL_OSC_INPUT_MASK);
control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_MODE, power_mode);
control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_OSC_INPUT, input);
writel(control_value, mmio + HIBMC_POWER_MODE_CTRL);
}
void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
{
u32 gate_reg;
u32 mode;
void __iomem *mmio = priv->mmio;
/* Get current power mode. */
mode = (readl(mmio + HIBMC_POWER_MODE_CTRL) &
HIBMC_PW_MODE_CTL_MODE_MASK) >> HIBMC_PW_MODE_CTL_MODE_SHIFT;
switch (mode) {
case HIBMC_PW_MODE_CTL_MODE_MODE0:
gate_reg = HIBMC_MODE0_GATE;
break;
case HIBMC_PW_MODE_CTL_MODE_MODE1:
gate_reg = HIBMC_MODE1_GATE;
break;
default:
gate_reg = HIBMC_MODE0_GATE;
break;
}
writel(gate, mmio + gate_reg);
}
static void hibmc_hw_config(struct hibmc_drm_private *priv)
{
u32 reg;
/* On hardware reset, power mode 0 is default. */
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
/* Enable display power gate & LOCALMEM power gate*/
reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
reg |= HIBMC_CURR_GATE_DISPLAY(1);
reg |= HIBMC_CURR_GATE_LOCALMEM(1);
hibmc_set_current_gate(priv, reg);
/*
* Reset the memory controller. If the memory controller
* is not reset in chip,the system might hang when sw accesses
* the memory.The memory should be resetted after
* changing the MXCLK.
*/
reg = readl(priv->mmio + HIBMC_MISC_CTRL);
reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
reg |= HIBMC_MSCCTL_LOCALMEM_RESET(0);
writel(reg, priv->mmio + HIBMC_MISC_CTRL);
reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
reg |= HIBMC_MSCCTL_LOCALMEM_RESET(1);
writel(reg, priv->mmio + HIBMC_MISC_CTRL);
}
static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
drm_err(dev, "Cannot map mmio region\n");
return -ENOMEM;
}
return 0;
}
static int hibmc_hw_init(struct hibmc_drm_private *priv)
{
int ret;
ret = hibmc_hw_map(priv);
if (ret)
return ret;
hibmc_hw_config(priv);
return 0;
}
static int hibmc_unload(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_atomic_helper_shutdown(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(to_pci_dev(dev->dev));
return 0;
}
static int hibmc_load(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
int ret;
ret = hibmc_hw_init(priv);
if (ret)
goto err;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;
}
ret = hibmc_kms_init(priv);
if (ret)
goto err;
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret) {
drm_err(dev, "failed to initialize vblank: %d\n", ret);
goto err;
}
ret = pci_enable_msi(pdev);
if (ret) {
drm_warn(dev, "enabling MSI failed: %d\n", ret);
} else {
/* PCI devices require shared interrupts. */
ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
dev->driver->name, dev);
if (ret)
drm_warn(dev, "install irq failed: %d\n", ret);
}
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
return 0;
err:
hibmc_unload(dev);
drm_err(dev, "failed to initialize drm driver: %d\n", ret);
return ret;
}
static int hibmc_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct hibmc_drm_private *priv;
struct drm_device *dev;
int ret;
ret = aperture_remove_conflicting_pci_devices(pdev, hibmc_driver.name);
if (ret)
return ret;
priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver,
struct hibmc_drm_private, dev);
if (IS_ERR(priv)) {
DRM_ERROR("failed to allocate drm_device\n");
return PTR_ERR(priv);
}
dev = &priv->dev;
pci_set_drvdata(pdev, dev);
ret = pcim_enable_device(pdev);
if (ret) {
drm_err(dev, "failed to enable pci device: %d\n", ret);
goto err_return;
}
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
goto err_return;
}
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "failed to register drv for userspace access: %d\n",
ret);
goto err_unload;
}
drm_client_setup(dev, NULL);
return 0;
err_unload:
hibmc_unload(dev);
err_return:
return ret;
}
static void hibmc_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
hibmc_unload(dev);
}
static void hibmc_pci_shutdown(struct pci_dev *pdev)
{
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
}
static const struct pci_device_id hibmc_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0x1711) },
{0,}
};
static struct pci_driver hibmc_pci_driver = {
.name = "hibmc-drm",
.id_table = hibmc_pci_table,
.probe = hibmc_pci_probe,
.remove = hibmc_pci_remove,
.shutdown = hibmc_pci_shutdown,
.driver.pm = &hibmc_pm_ops,
};
drm_module_pci_driver(hibmc_pci_driver);
MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
MODULE_AUTHOR("RongrongZou <[email protected]>");
MODULE_DESCRIPTION("DRM Driver for Hisilicon Hibmc");
MODULE_LICENSE("GPL v2");
|
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "vi.h"
#include "vid.h"
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "iceland_sdma_pkt_open.h"
#include "ivsrcid/ivsrcid_vislands30.h"
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = {
SDMA0_REGISTER_OFFSET,
SDMA1_REGISTER_OFFSET
};
static const u32 golden_settings_iceland_a11[] = {
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
};
static const u32 iceland_mgcg_cgcg_init[] = {
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
* DMA engines. These engines are used for compute
* and gfx. There are two DMA engines (SDMA0, SDMA1)
* and each one supports 1 ring buffer used for gfx
* and 2 queues used for compute.
*
* The programming model is very similar to the CP
* (ring buffer, IBs, etc.), but sDMA has it's own
* packet format that is different from the PM4 format
* used by the CP. sDMA supports copying data, writing
* embedded data, solid fills, and a number of other
* things. It also has support for tiling/detiling of
* buffers.
*/
static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_device_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
ARRAY_SIZE(iceland_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_iceland_a11,
ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
}
}
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
int err = 0, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_TOPAZ:
chip_name = "topaz";
break;
default:
BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma.instance[i].fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
out:
if (err) {
pr_err("sdma_v2_4: Failed to load firmware \"%s_sdma%s.bin\"\n",
chip_name, i == 0 ? "" : "1");
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
/**
* sdma_v2_4_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware (VI+).
*/
static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
return *ring->rptr_cpu_addr >> 2;
}
/**
* sdma_v2_4_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (VI+).
*/
static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
return wptr;
}
/**
* sdma_v2_4_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (VI+).
*/
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @job: job to retrieve vmid from
* @ib: IB object to schedule
* @flags: unused
*
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
}
/**
* sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
/**
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (VI).
*/
static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
/**
* sdma_v2_4_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the gfx async dma ring buffers (VI).
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
}
/**
* sdma_v2_4_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues (VI).
*/
static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* sdma_v2_4_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines (VI).
*/
static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!enable) {
sdma_v2_4_gfx_stop(adev);
sdma_v2_4_rlc_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
else
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
}
}
/**
* sdma_v2_4_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
}
return 0;
}
/**
* sdma_v2_4_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
{
/* XXX todo */
return 0;
}
/**
* sdma_v2_4_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
/* halt the engine before programing */
sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
if (r)
return r;
r = sdma_v2_4_rlc_resume(adev);
if (r)
return r;
return 0;
}
/**
* sdma_v2_4_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
if (r)
goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
error_free_wb:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v2_4_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
ib.ptr[4] = 0xDEADBEEF;
ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA (CIK).
*/
static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = bytes;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* sdma_v2_4_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (CIK).
*/
static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
* sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
*/
static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
* sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
/**
* sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (VI).
*/
static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* reference */
amdgpu_ring_write(ring, 0); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
int r;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
r = sdma_v2_4_init_microcode(adev);
if (r)
return r;
sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev);
sdma_v2_4_set_vm_pte_funcs(adev);
sdma_v2_4_set_irq_funcs(adev);
return 0;
}
static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = ip_block->adev;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
return r;
}
static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
sdma_v2_4_free_microcode(adev);
return 0;
}
static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
struct amdgpu_device *adev = ip_block->adev;
sdma_v2_4_init_golden_registers(adev);
r = sdma_v2_4_start(adev);
if (r)
return r;
return r;
}
static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
{
sdma_v2_4_enable(ip_block->adev, false);
return 0;
}
static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
{
return sdma_v2_4_hw_fini(ip_block);
}
static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
{
return sdma_v2_4_hw_init(ip_block);
}
static bool sdma_v2_4_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK))
return false;
return true;
}
static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK);
if (!tmp)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
/* sdma0 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
}
if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
/* sdma1 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
}
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
default:
break;
}
return 0;
}
static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
DRM_DEBUG("IH: SDMA trap\n");
switch (instance_id) {
case 0:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
case 1:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
}
return 0;
}
static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
DRM_ERROR("Illegal instruction in SDMA command stream\n");
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
if (instance_id <= 1 && queue_id == 0)
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
static int sdma_v2_4_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
/* XXX handled via the smc on VI */
return 0;
}
static int sdma_v2_4_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.sw_init = sdma_v2_4_sw_init,
.sw_fini = sdma_v2_4_sw_fini,
.hw_init = sdma_v2_4_hw_init,
.hw_fini = sdma_v2_4_hw_fini,
.suspend = sdma_v2_4_suspend,
.resume = sdma_v2_4_resume,
.is_idle = sdma_v2_4_is_idle,
.wait_for_idle = sdma_v2_4_wait_for_idle,
.soft_reset = sdma_v2_4_soft_reset,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
.secure_submission_supported = true,
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v2_4_ring_emit_hdp_flush */
3 + /* hdp invalidate */
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
.emit_wreg = sdma_v2_4_ring_emit_wreg,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
}
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
.set = sdma_v2_4_set_trap_irq_state,
.process = sdma_v2_4_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
.process = sdma_v2_4_process_illegal_inst_irq,
};
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
}
/**
* sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = byte_count;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine (VI).
*/
static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
.copy_max_bytes = 0x1fffff,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
.fill_max_bytes = 0x1fffff,
.fill_num_dw = 7,
.emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
};
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = sdma_v2_4_vm_copy_pte,
.write_pte = sdma_v2_4_vm_write_pte,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
};
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->vm_manager.vm_pte_scheds[i] =
&adev->sdma.instance[i].ring.sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 2,
.minor = 4,
.rev = 0,
.funcs = &sdma_v2_4_ip_funcs,
};
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#define EXPORT_ACPI_INTERFACES
#include <acpi/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
static acpi_status
acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs,
acpi_physical_address physical_address,
acpi_physical_address physical_address64);
/*******************************************************************************
*
* FUNCTION: acpi_hw_set_firmware_waking_vector
*
* PARAMETERS: facs - Pointer to FACS table
* physical_address - 32-bit physical address of ACPI real mode
* entry point
* physical_address64 - 64-bit physical address of ACPI protected
* mode entry point
*
* RETURN: Status
*
* DESCRIPTION: Sets the firmware_waking_vector fields of the FACS
*
******************************************************************************/
static acpi_status
acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs,
acpi_physical_address physical_address,
acpi_physical_address physical_address64)
{
ACPI_FUNCTION_TRACE(acpi_hw_set_firmware_waking_vector);
/*
* According to the ACPI specification 2.0c and later, the 64-bit
* waking vector should be cleared and the 32-bit waking vector should
* be used, unless we want the wake-up code to be called by the BIOS in
* Protected Mode. Some systems (for example HP dv5-1004nr) are known
* to fail to resume if the 64-bit vector is used.
*/
/* Set the 32-bit vector */
facs->firmware_waking_vector = (u32)physical_address;
if (facs->length > 32) {
if (facs->version >= 1) {
/* Set the 64-bit vector */
facs->xfirmware_waking_vector = physical_address64;
} else {
/* Clear the 64-bit vector if it exists */
facs->xfirmware_waking_vector = 0;
}
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_set_firmware_waking_vector
*
* PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
* entry point
* physical_address64 - 64-bit physical address of ACPI protected
* mode entry point
*
* RETURN: Status
*
* DESCRIPTION: Sets the firmware_waking_vector fields of the FACS
*
******************************************************************************/
acpi_status
acpi_set_firmware_waking_vector(acpi_physical_address physical_address,
acpi_physical_address physical_address64)
{
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
if (acpi_gbl_FACS) {
(void)acpi_hw_set_firmware_waking_vector(acpi_gbl_FACS,
physical_address,
physical_address64);
}
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
/*
* These functions are removed for the ACPI_REDUCED_HARDWARE case:
* acpi_enter_sleep_state_s4bios
*/
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_s4bios
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Perform a S4 bios request.
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
acpi_status acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
/* Clear the wake status bit (PM1) */
status =
acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_clear_acpi_status();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* 1) Disable all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.s4_bios_request, 8);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
do {
acpi_os_stall(ACPI_USEC_PER_MSEC);
status =
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} while (!in_value);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_prep
*
* PARAMETERS: sleep_state - Which sleep state to enter
*
* RETURN: Status
*
* DESCRIPTION: Prepare to enter a system sleep state.
* This function must execute with interrupts enabled.
* We break sleeping into 2 stages so that OSPM can handle
* various OS-specific tasks between the two steps.
*
******************************************************************************/
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
u32 sst_value;
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
status = acpi_get_sleep_type_data(sleep_state,
&acpi_gbl_sleep_type_a,
&acpi_gbl_sleep_type_b);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_get_sleep_type_data(ACPI_STATE_S0,
&acpi_gbl_sleep_type_a_s0,
&acpi_gbl_sleep_type_b_s0);
if (ACPI_FAILURE(status)) {
acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID;
}
/* Execute the _PTS method (Prepare To Sleep) */
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = sleep_state;
status =
acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
return_ACPI_STATUS(status);
}
/* Setup the argument to the _SST method (System STatus) */
switch (sleep_state) {
case ACPI_STATE_S0:
sst_value = ACPI_SST_WORKING;
break;
case ACPI_STATE_S1:
case ACPI_STATE_S2:
case ACPI_STATE_S3:
sst_value = ACPI_SST_SLEEPING;
break;
case ACPI_STATE_S4:
sst_value = ACPI_SST_SLEEP_CONTEXT;
break;
default:
sst_value = ACPI_SST_INDICATOR_OFF; /* Default is off */
break;
}
/*
* Set the system indicators to show the desired sleep state.
* _SST is an optional method (return no error if not found)
*/
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state
*
* PARAMETERS: sleep_state - Which sleep state to enter
*
* RETURN: Status
*
* DESCRIPTION: Enter a system sleep state
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
acpi_status acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
#if !ACPI_REDUCED_HARDWARE
if (!acpi_gbl_reduced_hardware)
status = acpi_hw_legacy_sleep(sleep_state);
else
#endif
status = acpi_hw_extended_sleep(sleep_state);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
/*******************************************************************************
*
* FUNCTION: acpi_leave_sleep_state_prep
*
* PARAMETERS: sleep_state - Which sleep state we are exiting
*
* RETURN: Status
*
* DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
* sleep. Called with interrupts DISABLED.
* We break wake/resume into 2 stages so that OSPM can handle
* various OS-specific tasks between the two steps.
*
******************************************************************************/
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
#if !ACPI_REDUCED_HARDWARE
if (!acpi_gbl_reduced_hardware)
status = acpi_hw_legacy_wake_prep(sleep_state);
else
#endif
status = acpi_hw_extended_wake_prep(sleep_state);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep)
/*******************************************************************************
*
* FUNCTION: acpi_leave_sleep_state
*
* PARAMETERS: sleep_state - Which sleep state we are exiting
*
* RETURN: Status
*
* DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
* Called with interrupts ENABLED.
*
******************************************************************************/
acpi_status acpi_leave_sleep_state(u8 sleep_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
#if !ACPI_REDUCED_HARDWARE
if (!acpi_gbl_reduced_hardware)
status = acpi_hw_legacy_wake(sleep_state);
else
#endif
status = acpi_hw_extended_wake(sleep_state);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_VMCORE_H
#define _UAPI_VMCORE_H
#include <linux/types.h>
#define VMCOREDD_NOTE_NAME "LINUX"
#define VMCOREDD_MAX_NAME_BYTES 44
struct vmcoredd_header {
__u32 n_namesz; /* Name size */
__u32 n_descsz; /* Content size */
__u32 n_type; /* NT_VMCOREDD */
__u8 name[8]; /* LINUX\0\0\0 */
__u8 dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Device dump's name */
};
#endif /* _UAPI_VMCORE_H */
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include "ipu3.h"
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
#include "ipu3-css-params.h"
#include "ipu3-dmamap.h"
#include "ipu3-tables.h"
/* IRQ configuration */
#define IMGU_IRQCTRL_IRQ_MASK (IMGU_IRQCTRL_IRQ_SP1 | \
IMGU_IRQCTRL_IRQ_SP2 | \
IMGU_IRQCTRL_IRQ_SW_PIN(0) | \
IMGU_IRQCTRL_IRQ_SW_PIN(1))
#define IPU3_CSS_FORMAT_BPP_DEN 50 /* Denominator */
/* Some sane limits for resolutions */
#define IPU3_CSS_MIN_RES 32
#define IPU3_CSS_MAX_H 3136
#define IPU3_CSS_MAX_W 4224
/* minimal envelope size(GDC in - out) should be 4 */
#define MIN_ENVELOPE 4
/*
* pre-allocated buffer size for CSS ABI, auxiliary frames
* after BDS and before GDC. Those values should be tuned
* to big enough to avoid buffer re-allocation when
* streaming to lower streaming latency.
*/
#define CSS_ABI_SIZE 136
#define CSS_BDS_SIZE (4480 * 3200 * 3)
#define CSS_GDC_SIZE (4224 * 3200 * 12 / 8)
#define IPU3_CSS_QUEUE_TO_FLAGS(q) (1 << (q))
#define IPU3_CSS_FORMAT_FL_IN \
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_IN)
#define IPU3_CSS_FORMAT_FL_OUT \
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_OUT)
#define IPU3_CSS_FORMAT_FL_VF \
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_VF)
/* Formats supported by IPU3 Camera Sub System */
static const struct imgu_css_format imgu_css_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_NV12,
.colorspace = V4L2_COLORSPACE_SRGB,
.frame_format = IMGU_ABI_FRAME_FORMAT_NV12,
.osys_format = IMGU_ABI_OSYS_FORMAT_NV12,
.osys_tiling = IMGU_ABI_OSYS_TILING_NONE,
.chroma_decim = 4,
.width_align = IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_OUT | IPU3_CSS_FORMAT_FL_VF,
}, {
/* Each 32 bytes contains 25 10-bit pixels */
.pixelformat = V4L2_PIX_FMT_IPU3_SBGGR10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_BGGR,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
.pixelformat = V4L2_PIX_FMT_IPU3_SGBRG10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GBRG,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
.pixelformat = V4L2_PIX_FMT_IPU3_SGRBG10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GRBG,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
.pixelformat = V4L2_PIX_FMT_IPU3_SRGGB10,
.colorspace = V4L2_COLORSPACE_RAW,
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_RGGB,
.bit_depth = 10,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
},
};
static const struct {
enum imgu_abi_queue_id qid;
size_t ptr_ofs;
} imgu_css_queues[IPU3_CSS_QUEUES] = {
[IPU3_CSS_QUEUE_IN] = {
IMGU_ABI_QUEUE_C_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
},
[IPU3_CSS_QUEUE_OUT] = {
IMGU_ABI_QUEUE_D_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
},
[IPU3_CSS_QUEUE_VF] = {
IMGU_ABI_QUEUE_E_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
},
[IPU3_CSS_QUEUE_STAT_3A] = {
IMGU_ABI_QUEUE_F_ID,
offsetof(struct imgu_abi_buffer, payload.s3a.data_ptr)
},
};
/* Initialize queue based on given format, adjust format as needed */
static int imgu_css_queue_init(struct imgu_css_queue *queue,
struct v4l2_pix_format_mplane *fmt, u32 flags)
{
struct v4l2_pix_format_mplane *const f = &queue->fmt.mpix;
unsigned int i;
u32 sizeimage;
INIT_LIST_HEAD(&queue->bufs);
queue->css_fmt = NULL; /* Disable */
if (!fmt)
return 0;
for (i = 0; i < ARRAY_SIZE(imgu_css_formats); i++) {
if (!(imgu_css_formats[i].flags & flags))
continue;
queue->css_fmt = &imgu_css_formats[i];
if (imgu_css_formats[i].pixelformat == fmt->pixelformat)
break;
}
if (!queue->css_fmt)
return -EINVAL; /* Could not find any suitable format */
queue->fmt.mpix = *fmt;
f->width = ALIGN(clamp_t(u32, f->width,
IPU3_CSS_MIN_RES, IPU3_CSS_MAX_W), 2);
f->height = ALIGN(clamp_t(u32, f->height,
IPU3_CSS_MIN_RES, IPU3_CSS_MAX_H), 2);
queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
f->plane_fmt[0].bytesperline =
imgu_bytesperline(f->width, queue->css_fmt->frame_format);
sizeimage = f->height * f->plane_fmt[0].bytesperline;
if (queue->css_fmt->chroma_decim)
sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
f->plane_fmt[0].sizeimage = sizeimage;
f->field = V4L2_FIELD_NONE;
f->num_planes = 1;
f->colorspace = queue->css_fmt->colorspace;
f->flags = 0;
f->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
f->quantization = V4L2_QUANTIZATION_DEFAULT;
f->xfer_func = V4L2_XFER_FUNC_DEFAULT;
memset(f->reserved, 0, sizeof(f->reserved));
return 0;
}
static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
{
return q->css_fmt;
}
/******************* css hw *******************/
/* In the style of writesl() defined in include/asm-generic/io.h */
static inline void writes(const void *mem, ssize_t count, void __iomem *addr)
{
if (count >= 4) {
const u32 *buf = mem;
count /= 4;
do {
writel(*buf++, addr);
addr += 4;
} while (--count);
}
}
/* Wait until register `reg', masked with `mask', becomes `cmp' */
static int imgu_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
{
u32 val;
return readl_poll_timeout(base + reg, val, (val & mask) == cmp,
1000, 100 * 1000);
}
/* Initialize the IPU3 CSS hardware and associated h/w blocks */
int imgu_css_set_powerup(struct device *dev, void __iomem *base,
unsigned int freq)
{
u32 pm_ctrl, state, val;
dev_dbg(dev, "%s with freq %u\n", __func__, freq);
/* Clear the CSS busy signal */
readl(base + IMGU_REG_GP_BUSY);
writel(0, base + IMGU_REG_GP_BUSY);
/* Wait for idle signal */
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS)) {
dev_err(dev, "failed to set CSS idle\n");
goto fail;
}
/* Reset the css */
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
base + IMGU_REG_PM_CTRL);
usleep_range(200, 300);
/** Prepare CSS */
pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
state = readl(base + IMGU_REG_STATE);
dev_dbg(dev, "CSS pm_ctrl 0x%x state 0x%x (power %s)\n",
pm_ctrl, state, state & IMGU_STATE_POWER_DOWN ? "down" : "up");
/* Power up CSS using wrapper */
if (state & IMGU_STATE_POWER_DOWN) {
writel(IMGU_PM_CTRL_RACE_TO_HALT | IMGU_PM_CTRL_START,
base + IMGU_REG_PM_CTRL);
if (imgu_hw_wait(base, IMGU_REG_PM_CTRL,
IMGU_PM_CTRL_START, 0)) {
dev_err(dev, "failed to power up CSS\n");
goto fail;
}
usleep_range(2000, 3000);
} else {
writel(IMGU_PM_CTRL_RACE_TO_HALT, base + IMGU_REG_PM_CTRL);
}
/* Set the busy bit */
writel(readl(base + IMGU_REG_GP_BUSY) | 1, base + IMGU_REG_GP_BUSY);
/* Set CSS clock frequency */
pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
val = pm_ctrl & ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
writel(val, base + IMGU_REG_PM_CTRL);
writel(0, base + IMGU_REG_GP_BUSY);
if (imgu_hw_wait(base, IMGU_REG_STATE,
IMGU_STATE_PWRDNM_FSM_MASK, 0)) {
dev_err(dev, "failed to pwrdn CSS\n");
goto fail;
}
val = (freq / IMGU_SYSTEM_REQ_FREQ_DIVIDER) & IMGU_SYSTEM_REQ_FREQ_MASK;
writel(val, base + IMGU_REG_SYSTEM_REQ);
writel(1, base + IMGU_REG_GP_BUSY);
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_HALT,
base + IMGU_REG_PM_CTRL);
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
IMGU_STATE_HALT_STS)) {
dev_err(dev, "failed to halt CSS\n");
goto fail;
}
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_START,
base + IMGU_REG_PM_CTRL);
if (imgu_hw_wait(base, IMGU_REG_PM_CTRL, IMGU_PM_CTRL_START, 0)) {
dev_err(dev, "failed to start CSS\n");
goto fail;
}
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_UNHALT,
base + IMGU_REG_PM_CTRL);
val = readl(base + IMGU_REG_PM_CTRL); /* get pm_ctrl */
val &= ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
val |= pm_ctrl & (IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
writel(val, base + IMGU_REG_PM_CTRL);
return 0;
fail:
imgu_css_set_powerdown(dev, base);
return -EIO;
}
void imgu_css_set_powerdown(struct device *dev, void __iomem *base)
{
dev_dbg(dev, "%s\n", __func__);
/* wait for cio idle signal */
if (imgu_hw_wait(base, IMGU_REG_CIO_GATE_BURST_STATE,
IMGU_CIO_GATE_BURST_MASK, 0))
dev_warn(dev, "wait cio gate idle timeout");
/* wait for css idle signal */
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS))
dev_warn(dev, "wait css idle timeout\n");
/* do halt-halted handshake with css */
writel(1, base + IMGU_REG_GP_HALT);
if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
IMGU_STATE_HALT_STS))
dev_warn(dev, "failed to halt css");
/* de-assert the busy bit */
writel(0, base + IMGU_REG_GP_BUSY);
}
static void imgu_css_hw_enable_irq(struct imgu_css *css)
{
void __iomem *const base = css->base;
u32 val, i;
/* Set up interrupts */
/*
* Enable IRQ on the SP which signals that SP goes to idle
* (aka ready state) and set trigger to pulse
*/
val = readl(base + IMGU_REG_SP_CTRL(0)) | IMGU_CTRL_IRQ_READY;
writel(val, base + IMGU_REG_SP_CTRL(0));
writel(val | IMGU_CTRL_IRQ_CLEAR, base + IMGU_REG_SP_CTRL(0));
/* Enable IRQs from the IMGU wrapper */
writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_ENABLE);
/* Clear */
writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_STATUS);
/* Enable IRQs from main IRQ controller */
writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(IMGU_IRQCTRL_MAIN));
writel(0, base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_EDGE(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_CLEAR(IMGU_IRQCTRL_MAIN));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
/* Wait for write complete */
readl(base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
/* Enable IRQs from SP0 and SP1 controllers */
for (i = IMGU_IRQCTRL_SP0; i <= IMGU_IRQCTRL_SP1; i++) {
writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(i));
writel(0, base + IMGU_REG_IRQCTRL_MASK(i));
writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_EDGE(i));
writel(IMGU_IRQCTRL_IRQ_MASK,
base + IMGU_REG_IRQCTRL_ENABLE(i));
writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_CLEAR(i));
writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_MASK(i));
/* Wait for write complete */
readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
}
}
static int imgu_css_hw_init(struct imgu_css *css)
{
/* For checking that streaming monitor statuses are valid */
static const struct {
u32 reg;
u32 mask;
const char *name;
} stream_monitors[] = {
{
IMGU_REG_GP_SP1_STRMON_STAT,
IMGU_GP_STRMON_STAT_ISP_PORT_SP12ISP,
"ISP0 to SP0"
}, {
IMGU_REG_GP_ISP_STRMON_STAT,
IMGU_GP_STRMON_STAT_SP1_PORT_ISP2SP1,
"SP0 to ISP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_ISP2DMA,
"ISP0 to DMA0"
}, {
IMGU_REG_GP_ISP_STRMON_STAT,
IMGU_GP_STRMON_STAT_ISP_PORT_DMA2ISP,
"DMA0 to ISP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
"ISP0 to GDC0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
"GDC0 to ISP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_SP12DMA,
"SP0 to DMA0"
}, {
IMGU_REG_GP_SP1_STRMON_STAT,
IMGU_GP_STRMON_STAT_SP1_PORT_DMA2SP1,
"DMA0 to SP0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
"SP0 to GDC0"
}, {
IMGU_REG_GP_MOD_STRMON_STAT,
IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
"GDC0 to SP0"
},
};
struct device *dev = css->dev;
void __iomem *const base = css->base;
u32 val, i;
/* Set instruction cache address and inv bit for ISP, SP, and SP1 */
for (i = 0; i < IMGU_NUM_SP; i++) {
struct imgu_fw_info *bi =
&css->fwp->binary_header[css->fw_sp[i]];
writel(css->binary[css->fw_sp[i]].daddr,
base + IMGU_REG_SP_ICACHE_ADDR(bi->type));
writel(readl(base + IMGU_REG_SP_CTRL(bi->type)) |
IMGU_CTRL_ICACHE_INV,
base + IMGU_REG_SP_CTRL(bi->type));
}
writel(css->binary[css->fw_bl].daddr, base + IMGU_REG_ISP_ICACHE_ADDR);
writel(readl(base + IMGU_REG_ISP_CTRL) | IMGU_CTRL_ICACHE_INV,
base + IMGU_REG_ISP_CTRL);
/* Check that IMGU hardware is ready */
if (!(readl(base + IMGU_REG_SP_CTRL(0)) & IMGU_CTRL_IDLE)) {
dev_err(dev, "SP is not idle\n");
return -EIO;
}
if (!(readl(base + IMGU_REG_ISP_CTRL) & IMGU_CTRL_IDLE)) {
dev_err(dev, "ISP is not idle\n");
return -EIO;
}
for (i = 0; i < ARRAY_SIZE(stream_monitors); i++) {
val = readl(base + stream_monitors[i].reg);
if (val & stream_monitors[i].mask) {
dev_err(dev, "error: Stream monitor %s is valid\n",
stream_monitors[i].name);
return -EIO;
}
}
/* Initialize GDC with default values */
for (i = 0; i < ARRAY_SIZE(imgu_css_gdc_lut[0]); i++) {
u32 val0 = imgu_css_gdc_lut[0][i] & IMGU_GDC_LUT_MASK;
u32 val1 = imgu_css_gdc_lut[1][i] & IMGU_GDC_LUT_MASK;
u32 val2 = imgu_css_gdc_lut[2][i] & IMGU_GDC_LUT_MASK;
u32 val3 = imgu_css_gdc_lut[3][i] & IMGU_GDC_LUT_MASK;
writel(val0 | (val1 << 16),
base + IMGU_REG_GDC_LUT_BASE + i * 8);
writel(val2 | (val3 << 16),
base + IMGU_REG_GDC_LUT_BASE + i * 8 + 4);
}
return 0;
}
/* Boot the given IPU3 CSS SP */
static int imgu_css_hw_start_sp(struct imgu_css *css, int sp)
{
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_sp_init_dmem_cfg dmem_cfg = {
.ddr_data_addr = css->binary[css->fw_sp[sp]].daddr
+ bi->blob.data_source,
.dmem_data_addr = bi->blob.data_target,
.dmem_bss_addr = bi->blob.bss_target,
.data_size = bi->blob.data_size,
.bss_size = bi->blob.bss_size,
.sp_id = sp,
};
writes(&dmem_cfg, sizeof(dmem_cfg), base +
IMGU_REG_SP_DMEM_BASE(sp) + bi->info.sp.init_dmem_data);
writel(bi->info.sp.sp_entry, base + IMGU_REG_SP_START_ADDR(sp));
writel(readl(base + IMGU_REG_SP_CTRL(sp))
| IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_SP_CTRL(sp));
if (imgu_hw_wait(css->base, IMGU_REG_SP_DMEM_BASE(sp)
+ bi->info.sp.sw_state,
~0, IMGU_ABI_SP_SWSTATE_INITIALIZED))
return -EIO;
return 0;
}
/* Start the IPU3 CSS ImgU (Imaging Unit) and all the SPs */
static int imgu_css_hw_start(struct imgu_css *css)
{
static const u32 event_mask =
((1 << IMGU_ABI_EVTTYPE_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_3A_STATS_DONE) |
(1 << IMGU_ABI_EVTTYPE_DIS_STATS_DONE) |
(1 << IMGU_ABI_EVTTYPE_PIPELINE_DONE) |
(1 << IMGU_ABI_EVTTYPE_FRAME_TAGGED) |
(1 << IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE) |
(1 << IMGU_ABI_EVTTYPE_METADATA_DONE) |
(1 << IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE))
<< IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT;
void __iomem *const base = css->base;
struct imgu_fw_info *bi, *bl = &css->fwp->binary_header[css->fw_bl];
unsigned int i;
writel(IMGU_TLB_INVALIDATE, base + IMGU_REG_TLB_INVALIDATE);
/* Start bootloader */
writel(IMGU_ABI_BL_SWSTATE_BUSY,
base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.sw_state);
writel(IMGU_NUM_SP,
base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.num_dma_cmds);
for (i = 0; i < IMGU_NUM_SP; i++) {
int j = IMGU_NUM_SP - i - 1; /* load sp1 first, then sp0 */
struct imgu_fw_info *sp =
&css->fwp->binary_header[css->fw_sp[j]];
struct imgu_abi_bl_dma_cmd_entry dma_cmd = {
.src_addr = css->binary[css->fw_sp[j]].daddr
+ sp->blob.text_source,
.size = sp->blob.text_size,
.dst_type = IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM,
.dst_addr = IMGU_SP_PMEM_BASE(j),
};
writes(&dma_cmd, sizeof(dma_cmd),
base + IMGU_REG_ISP_DMEM_BASE + i * sizeof(dma_cmd) +
bl->info.bl.dma_cmd_list);
}
writel(bl->info.bl.bl_entry, base + IMGU_REG_ISP_START_ADDR);
writel(readl(base + IMGU_REG_ISP_CTRL)
| IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_ISP_CTRL);
if (imgu_hw_wait(css->base, IMGU_REG_ISP_DMEM_BASE
+ bl->info.bl.sw_state, ~0,
IMGU_ABI_BL_SWSTATE_OK)) {
dev_err(css->dev, "failed to start bootloader\n");
return -EIO;
}
/* Start ISP */
memset(css->xmem_sp_group_ptrs.vaddr, 0,
sizeof(struct imgu_abi_sp_group));
bi = &css->fwp->binary_header[css->fw_sp[0]];
writel(css->xmem_sp_group_ptrs.daddr,
base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.per_frame_data);
writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state);
writel(1, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
if (imgu_css_hw_start_sp(css, 0))
return -EIO;
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.isp_started);
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_queues_initialized);
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sleep_mode);
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(0)
+ bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
/* Enable all events for all queues */
for (i = 0; i < IPU3_CSS_PIPE_ID_NUM; i++)
writel(event_mask, base + IMGU_REG_SP_DMEM_BASE(0)
+ bi->info.sp.host_sp_com
+ IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(i));
writel(1, base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_queues_initialized);
/* Start SP1 */
bi = &css->fwp->binary_header[css->fw_sp[1]];
writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
base + IMGU_REG_SP_DMEM_BASE(1) + bi->info.sp.sw_state);
if (imgu_css_hw_start_sp(css, 1))
return -EIO;
writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(1)
+ bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
return 0;
}
static void imgu_css_hw_stop(struct imgu_css *css)
{
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
/* Stop fw */
writel(IMGU_ABI_SP_COMM_COMMAND_TERMINATE,
base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
if (imgu_hw_wait(css->base, IMGU_REG_SP_CTRL(0),
IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
dev_err(css->dev, "wait sp0 idle timeout.\n");
if (readl(base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state) !=
IMGU_ABI_SP_SWSTATE_TERMINATED)
dev_err(css->dev, "sp0 is not terminated.\n");
if (imgu_hw_wait(css->base, IMGU_REG_ISP_CTRL,
IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
dev_err(css->dev, "wait isp idle timeout\n");
}
static void imgu_css_hw_cleanup(struct imgu_css *css)
{
void __iomem *const base = css->base;
/** Reset CSS **/
/* Clear the CSS busy signal */
readl(base + IMGU_REG_GP_BUSY);
writel(0, base + IMGU_REG_GP_BUSY);
/* Wait for idle signal */
if (imgu_hw_wait(css->base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS))
dev_err(css->dev, "failed to shut down hw cleanly\n");
/* Reset the css */
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
base + IMGU_REG_PM_CTRL);
usleep_range(200, 300);
}
static void imgu_css_pipeline_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int i;
imgu_css_pool_cleanup(imgu, &css_pipe->pool.parameter_set_info);
imgu_css_pool_cleanup(imgu, &css_pipe->pool.acc);
imgu_css_pool_cleanup(imgu, &css_pipe->pool.gdc);
imgu_css_pool_cleanup(imgu, &css_pipe->pool.obgrid);
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
imgu_css_pool_cleanup(imgu, &css_pipe->pool.binary_params_p[i]);
}
/*
* This function initializes various stages of the
* IPU3 CSS ISP pipeline
*/
static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
{
static const int BYPC = 2; /* Bytes per component */
static const struct imgu_abi_buffer_sp buffer_sp_init = {
.buf_src = {.queue_id = IMGU_ABI_QUEUE_EVENT_ID},
.buf_type = IMGU_ABI_BUFFER_TYPE_INVALID,
};
struct imgu_abi_isp_iterator_config *cfg_iter;
struct imgu_abi_isp_ref_config *cfg_ref;
struct imgu_abi_isp_dvs_config *cfg_dvs;
struct imgu_abi_isp_tnr3_config *cfg_tnr;
struct imgu_abi_isp_ref_dmem_state *cfg_ref_state;
struct imgu_abi_isp_tnr3_dmem_state *cfg_tnr_state;
const int stage = 0;
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_css_queue *css_queue_in =
&css_pipe->queue[IPU3_CSS_QUEUE_IN];
struct imgu_css_queue *css_queue_out =
&css_pipe->queue[IPU3_CSS_QUEUE_OUT];
struct imgu_css_queue *css_queue_vf =
&css_pipe->queue[IPU3_CSS_QUEUE_VF];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
struct imgu_fw_config_memory_offsets *cofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG];
struct imgu_fw_state_memory_offsets *sofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE];
struct imgu_abi_isp_stage *isp_stage;
struct imgu_abi_sp_stage *sp_stage;
struct imgu_abi_sp_group *sp_group;
struct imgu_abi_frames_sp *frames_sp;
struct imgu_abi_frame_sp *frame_sp;
struct imgu_abi_frame_sp_info *frame_sp_info;
const unsigned int bds_width_pad =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS);
const enum imgu_abi_memories m0 = IMGU_ABI_MEM_ISP_DMEM0;
enum imgu_abi_param_class cfg = IMGU_ABI_PARAM_CLASS_CONFIG;
void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
struct imgu_device *imgu = dev_get_drvdata(css->dev);
dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
/* Configure iterator */
cfg_iter = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.iterator,
sizeof(*cfg_iter), vaddr);
if (!cfg_iter)
goto bad_firmware;
frame_sp_info = &cfg_iter->input_info;
frame_sp_info->res.width = css_queue_in->fmt.mpix.width;
frame_sp_info->res.height = css_queue_in->fmt.mpix.height;
frame_sp_info->padded_width = css_queue_in->width_pad;
frame_sp_info->format = css_queue_in->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_in->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_in->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp_info = &cfg_iter->internal_info;
frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
frame_sp_info->padded_width = bds_width_pad;
frame_sp_info->format = css_queue_out->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp_info = &cfg_iter->output_info;
frame_sp_info->res.width = css_queue_out->fmt.mpix.width;
frame_sp_info->res.height = css_queue_out->fmt.mpix.height;
frame_sp_info->padded_width = css_queue_out->width_pad;
frame_sp_info->format = css_queue_out->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp_info = &cfg_iter->vf_info;
frame_sp_info->res.width = css_queue_vf->fmt.mpix.width;
frame_sp_info->res.height = css_queue_vf->fmt.mpix.height;
frame_sp_info->padded_width = css_queue_vf->width_pad;
frame_sp_info->format = css_queue_vf->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
cfg_iter->dvs_envelope.width =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
cfg_iter->dvs_envelope.height =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
/* Configure reference (delay) frames */
cfg_ref = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.ref,
sizeof(*cfg_ref), vaddr);
if (!cfg_ref)
goto bad_firmware;
cfg_ref->port_b.crop = 0;
cfg_ref->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES / BYPC;
cfg_ref->port_b.width =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width;
cfg_ref->port_b.stride =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline;
cfg_ref->width_a_over_b =
IPU3_UAPI_ISP_VEC_ELEMS / cfg_ref->port_b.elems;
cfg_ref->dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++) {
cfg_ref->ref_frame_addr_y[i] =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr;
cfg_ref->ref_frame_addr_c[i] =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr +
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline *
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
}
for (; i < IMGU_ABI_FRAMES_REF; i++) {
cfg_ref->ref_frame_addr_y[i] = 0;
cfg_ref->ref_frame_addr_c[i] = 0;
}
/* Configure DVS (digital video stabilization) */
cfg_dvs = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.dvs, sizeof(*cfg_dvs),
vaddr);
if (!cfg_dvs)
goto bad_firmware;
cfg_dvs->num_horizontal_blocks =
ALIGN(DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
IMGU_DVS_BLOCK_W), 2);
cfg_dvs->num_vertical_blocks =
DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
IMGU_DVS_BLOCK_H);
/* Configure TNR (temporal noise reduction) */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
cfg_tnr = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.tnr3,
sizeof(*cfg_tnr),
vaddr);
if (!cfg_tnr)
goto bad_firmware;
cfg_tnr->port_b.crop = 0;
cfg_tnr->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES;
cfg_tnr->port_b.width =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
cfg_tnr->port_b.stride =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline;
cfg_tnr->width_a_over_b =
IPU3_UAPI_ISP_VEC_ELEMS / cfg_tnr->port_b.elems;
cfg_tnr->frame_height =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
cfg_tnr->delay_frame = IPU3_CSS_AUX_FRAMES - 1;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
cfg_tnr->frame_addr[i] =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR]
.mem[i].daddr;
for (; i < IMGU_ABI_FRAMES_TNR; i++)
cfg_tnr->frame_addr[i] = 0;
}
/* Configure ref dmem state parameters */
cfg = IMGU_ABI_PARAM_CLASS_STATE;
vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
cfg_ref_state = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.ref,
sizeof(*cfg_ref_state),
vaddr);
if (!cfg_ref_state)
goto bad_firmware;
cfg_ref_state->ref_in_buf_idx = 0;
cfg_ref_state->ref_out_buf_idx = 1;
/* Configure tnr dmem state parameters */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
cfg_tnr_state =
imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.tnr3,
sizeof(*cfg_tnr_state),
vaddr);
if (!cfg_tnr_state)
goto bad_firmware;
cfg_tnr_state->in_bufidx = 0;
cfg_tnr_state->out_bufidx = 1;
cfg_tnr_state->bypass_filter = 0;
cfg_tnr_state->total_frame_counter = 0;
for (i = 0; i < IMGU_ABI_BUF_SETS_TNR; i++)
cfg_tnr_state->buffer_frame_counter[i] = 0;
}
/* Configure ISP stage */
isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr;
memset(isp_stage, 0, sizeof(*isp_stage));
isp_stage->blob_info = bi->blob;
isp_stage->binary_info = bi->info.isp.sp;
strscpy(isp_stage->binary_name,
(char *)css->fwp + bi->blob.prog_name_offset,
sizeof(isp_stage->binary_name));
isp_stage->mem_initializers = bi->info.isp.sp.mem_initializers;
for (i = IMGU_ABI_PARAM_CLASS_CONFIG; i < IMGU_ABI_PARAM_CLASS_NUM; i++)
for (j = 0; j < IMGU_ABI_NUM_MEMORIES; j++)
isp_stage->mem_initializers.params[i][j].address =
css_pipe->binary_params_cs[i - 1][j].daddr;
/* Configure SP stage */
sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
memset(sp_stage, 0, sizeof(*sp_stage));
frames_sp = &sp_stage->frames;
frames_sp->in.buf_attr = buffer_sp_init;
for (i = 0; i < IMGU_ABI_BINARY_MAX_OUTPUT_PORTS; i++)
frames_sp->out[i].buf_attr = buffer_sp_init;
frames_sp->out_vf.buf_attr = buffer_sp_init;
frames_sp->s3a_buf = buffer_sp_init;
frames_sp->dvs_buf = buffer_sp_init;
sp_stage->stage_type = IMGU_ABI_STAGE_TYPE_ISP;
sp_stage->num = stage;
sp_stage->isp_online = 0;
sp_stage->isp_copy_vf = 0;
sp_stage->isp_copy_output = 0;
sp_stage->enable.vf_output = css_pipe->vf_output_en;
frames_sp->effective_in_res.width =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
frames_sp->effective_in_res.height =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
frame_sp = &frames_sp->in;
frame_sp->info.res.width = css_queue_in->fmt.mpix.width;
frame_sp->info.res.height = css_queue_in->fmt.mpix.height;
frame_sp->info.padded_width = css_queue_in->width_pad;
frame_sp->info.format = css_queue_in->css_fmt->frame_format;
frame_sp->info.raw_bit_depth = css_queue_in->css_fmt->bit_depth;
frame_sp->info.raw_bayer_order = css_queue_in->css_fmt->bayer_order;
frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
frame_sp = &frames_sp->out[0];
frame_sp->info.res.width = css_queue_out->fmt.mpix.width;
frame_sp->info.res.height = css_queue_out->fmt.mpix.height;
frame_sp->info.padded_width = css_queue_out->width_pad;
frame_sp->info.format = css_queue_out->css_fmt->frame_format;
frame_sp->info.raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp->info.raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp->planes.nv.uv.offset = css_queue_out->width_pad *
css_queue_out->fmt.mpix.height;
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
frame_sp = &frames_sp->out[1];
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_EVENT_ID;
frame_sp_info = &frames_sp->internal_frame_info;
frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
frame_sp_info->padded_width = bds_width_pad;
frame_sp_info->format = css_queue_out->css_fmt->frame_format;
frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp = &frames_sp->out_vf;
frame_sp->info.res.width = css_queue_vf->fmt.mpix.width;
frame_sp->info.res.height = css_queue_vf->fmt.mpix.height;
frame_sp->info.padded_width = css_queue_vf->width_pad;
frame_sp->info.format = css_queue_vf->css_fmt->frame_format;
frame_sp->info.raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
frame_sp->info.raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
frame_sp->planes.yuv.u.offset = css_queue_vf->width_pad *
css_queue_vf->fmt.mpix.height;
frame_sp->planes.yuv.v.offset = css_queue_vf->width_pad *
css_queue_vf->fmt.mpix.height * 5 / 4;
frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
frames_sp->s3a_buf.buf_src.queue_id = IMGU_ABI_QUEUE_F_ID;
frames_sp->s3a_buf.buf_type = IMGU_ABI_BUFFER_TYPE_3A_STATISTICS;
frames_sp->dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
frames_sp->dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
sp_stage->dvs_envelope.width =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
sp_stage->dvs_envelope.height =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
sp_stage->isp_pipe_version =
bi->info.isp.sp.pipeline.isp_pipe_version;
sp_stage->isp_deci_log_factor =
clamp(max(fls(css_pipe->rect[IPU3_CSS_RECT_BDS].width /
IMGU_MAX_BQ_GRID_WIDTH),
fls(css_pipe->rect[IPU3_CSS_RECT_BDS].height /
IMGU_MAX_BQ_GRID_HEIGHT)) - 1, 3, 5);
sp_stage->isp_vf_downscale_bits = 0;
sp_stage->if_config_index = 255;
sp_stage->sp_enable_xnr = 0;
sp_stage->num_stripes = stripes;
sp_stage->enable.s3a = 1;
sp_stage->enable.dvs_stats = 0;
sp_stage->xmem_bin_addr = css->binary[css_pipe->bindex].daddr;
sp_stage->xmem_map_addr = css_pipe->sp_ddr_ptrs.daddr;
sp_stage->isp_stage_addr =
css_pipe->xmem_isp_stage_ptrs[pipe][stage].daddr;
/* Configure SP group */
sp_group = css->xmem_sp_group_ptrs.vaddr;
memset(&sp_group->pipe[pipe], 0, sizeof(struct imgu_abi_sp_pipeline));
sp_group->pipe[pipe].num_stages = 1;
sp_group->pipe[pipe].pipe_id = css_pipe->pipe_id;
sp_group->pipe[pipe].thread_id = pipe;
sp_group->pipe[pipe].pipe_num = pipe;
sp_group->pipe[pipe].num_execs = -1;
sp_group->pipe[pipe].pipe_qos_config = -1;
sp_group->pipe[pipe].required_bds_factor = 0;
sp_group->pipe[pipe].dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
sp_group->pipe[pipe].inout_port_config =
IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST |
IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST;
sp_group->pipe[pipe].scaler_pp_lut = 0;
sp_group->pipe[pipe].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
sp_group->pipe[pipe].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
sp_group->pipe[pipe].sp_stage_addr[stage] =
css_pipe->xmem_sp_stage_ptrs[pipe][stage].daddr;
sp_group->pipe[pipe].pipe_config =
bi->info.isp.sp.enable.params ? (1 << pipe) : 0;
sp_group->pipe[pipe].pipe_config |= IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP;
/* Initialize parameter pools */
if (imgu_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
sizeof(struct imgu_abi_parameter_set_info)) ||
imgu_css_pool_init(imgu, &css_pipe->pool.acc,
sizeof(struct imgu_abi_acc_param)) ||
imgu_css_pool_init(imgu, &css_pipe->pool.gdc,
sizeof(struct imgu_abi_gdc_warp_param) *
3 * cfg_dvs->num_horizontal_blocks / 2 *
cfg_dvs->num_vertical_blocks) ||
imgu_css_pool_init(imgu, &css_pipe->pool.obgrid,
imgu_css_fw_obgrid_size(
&css->fwp->binary_header[css_pipe->bindex])))
goto out_of_memory;
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
if (imgu_css_pool_init(imgu,
&css_pipe->pool.binary_params_p[i],
bi->info.isp.sp.mem_initializers.params
[IMGU_ABI_PARAM_CLASS_PARAM][i].size))
goto out_of_memory;
return 0;
bad_firmware:
imgu_css_pipeline_cleanup(css, pipe);
return -EPROTO;
out_of_memory:
imgu_css_pipeline_cleanup(css, pipe);
return -ENOMEM;
}
static u8 imgu_css_queue_pos(struct imgu_css *css, int queue, int thread)
{
static const unsigned int sp;
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
bi->info.sp.host_sp_queue;
return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
readb(&q->host2sp_evtq_info.end);
}
/* Sent data to sp using given buffer queue, or if queue < 0, event queue. */
static int imgu_css_queue_data(struct imgu_css *css,
int queue, int thread, u32 data)
{
static const unsigned int sp;
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
bi->info.sp.host_sp_queue;
u8 size, start, end, end2;
if (queue >= 0) {
size = readb(&q->host2sp_bufq_info[thread][queue].size);
start = readb(&q->host2sp_bufq_info[thread][queue].start);
end = readb(&q->host2sp_bufq_info[thread][queue].end);
} else {
size = readb(&q->host2sp_evtq_info.size);
start = readb(&q->host2sp_evtq_info.start);
end = readb(&q->host2sp_evtq_info.end);
}
if (size == 0)
return -EIO;
end2 = (end + 1) % size;
if (end2 == start)
return -EBUSY; /* Queue full */
if (queue >= 0) {
writel(data, &q->host2sp_bufq[thread][queue][end]);
writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
} else {
writel(data, &q->host2sp_evtq[end]);
writeb(end2, &q->host2sp_evtq_info.end);
}
return 0;
}
/* Receive data using given buffer queue, or if queue < 0, event queue. */
static int imgu_css_dequeue_data(struct imgu_css *css, int queue, u32 *data)
{
static const unsigned int sp;
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
bi->info.sp.host_sp_queue;
u8 size, start, end, start2;
if (queue >= 0) {
size = readb(&q->sp2host_bufq_info[queue].size);
start = readb(&q->sp2host_bufq_info[queue].start);
end = readb(&q->sp2host_bufq_info[queue].end);
} else {
size = readb(&q->sp2host_evtq_info.size);
start = readb(&q->sp2host_evtq_info.start);
end = readb(&q->sp2host_evtq_info.end);
}
if (size == 0)
return -EIO;
if (end == start)
return -EBUSY; /* Queue empty */
start2 = (start + 1) % size;
if (queue >= 0) {
*data = readl(&q->sp2host_bufq[queue][start]);
writeb(start2, &q->sp2host_bufq_info[queue].start);
} else {
int r;
*data = readl(&q->sp2host_evtq[start]);
writeb(start2, &q->sp2host_evtq_info.start);
/* Acknowledge events dequeued from event queue */
r = imgu_css_queue_data(css, queue, 0,
IMGU_ABI_EVENT_EVENT_DEQUEUED);
if (r < 0)
return r;
}
return 0;
}
/* Free binary-specific resources */
static void imgu_css_binary_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (j = 0; j < IMGU_ABI_PARAM_CLASS_NUM - 1; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
imgu_dmamap_free(imgu,
&css_pipe->binary_params_cs[j][i]);
j = IPU3_CSS_AUX_FRAME_REF;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
imgu_dmamap_free(imgu,
&css_pipe->aux_frames[j].mem[i]);
j = IPU3_CSS_AUX_FRAME_TNR;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
imgu_dmamap_free(imgu,
&css_pipe->aux_frames[j].mem[i]);
}
static int imgu_css_binary_preallocate(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (j = IMGU_ABI_PARAM_CLASS_CONFIG;
j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->binary_params_cs[j - 1][i],
CSS_ABI_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
CSS_BDS_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
CSS_GDC_SIZE))
goto out_of_memory;
return 0;
out_of_memory:
imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
/* allocate binary-specific resources */
static int imgu_css_binary_setup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];
struct imgu_device *imgu = dev_get_drvdata(css->dev);
int i, j, size;
static const int BYPC = 2; /* Bytes per component */
unsigned int w, h;
/* Allocate parameter memory blocks for this binary */
for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->binary_params_cs[j - 1][i],
bi->info.isp.sp.mem_initializers.params[j][i].size))
goto out_of_memory;
}
/* Allocate internal frame buffers */
/* Reference frames for DVS, FRAME_FORMAT_YUV420_16 */
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel = BYPC;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width =
css_pipe->rect[IPU3_CSS_RECT_BDS].width;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].height,
IMGU_DVS_BLOCK_H) + 2 * IMGU_GDC_BUF_Y;
h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
w = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS) + 2 * IMGU_GDC_BUF_X;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline =
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;
size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
size))
goto out_of_memory;
/* TNR frames for temporal noise reduction, FRAME_FORMAT_YUV_LINE */
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =
roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
bi->info.isp.sp.block.block_width *
IPU3_UAPI_ISP_VEC_ELEMS);
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =
roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
bi->info.isp.sp.block.output_block_height);
w = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;
h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
size))
goto out_of_memory;
return 0;
out_of_memory:
imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
int imgu_css_start_streaming(struct imgu_css *css)
{
u32 data;
int r, pipe;
if (css->streaming)
return -EPROTO;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_binary_setup(css, pipe);
if (r < 0)
return r;
}
r = imgu_css_hw_init(css);
if (r < 0)
return r;
r = imgu_css_hw_start(css);
if (r < 0)
goto fail;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_pipeline_init(css, pipe);
if (r < 0)
goto fail;
}
css->streaming = true;
imgu_css_hw_enable_irq(css);
/* Initialize parameters to default */
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_set_parameters(css, pipe, NULL);
if (r < 0)
goto fail;
}
while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
;
if (r != -EBUSY)
goto fail;
while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_B_ID, &data)))
;
if (r != -EBUSY)
goto fail;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_START_STREAM |
pipe << 16);
if (r < 0)
goto fail;
}
return 0;
fail:
css->streaming = false;
imgu_css_hw_cleanup(css);
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
imgu_css_pipeline_cleanup(css, pipe);
imgu_css_binary_cleanup(css, pipe);
}
return r;
}
void imgu_css_stop_streaming(struct imgu_css *css)
{
struct imgu_css_buffer *b, *b0;
int q, r, pipe;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_STOP_STREAM);
if (r < 0)
dev_warn(css->dev, "failed on stop stream event\n");
}
if (!css->streaming)
return;
imgu_css_hw_stop(css);
imgu_css_hw_cleanup(css);
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
imgu_css_pipeline_cleanup(css, pipe);
spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
list_for_each_entry_safe(b, b0,
&css_pipe->queue[q].bufs,
list) {
b->state = IPU3_CSS_BUFFER_FAILED;
list_del(&b->list);
}
spin_unlock(&css_pipe->qlock);
}
css->streaming = false;
}
bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe)
{
int q;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
if (!list_empty(&css_pipe->queue[q].bufs))
break;
spin_unlock(&css_pipe->qlock);
return (q == IPU3_CSS_QUEUES);
}
bool imgu_css_queue_empty(struct imgu_css *css)
{
unsigned int pipe;
bool ret = false;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
ret &= imgu_css_pipe_queue_empty(css, pipe);
return ret;
}
bool imgu_css_is_streaming(struct imgu_css *css)
{
return css->streaming;
}
static int imgu_css_map_init(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i;
/* Allocate and map common structures with imgu hardware */
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
if (!imgu_dmamap_alloc(imgu,
&css_pipe->xmem_sp_stage_ptrs[p][i],
sizeof(struct imgu_abi_sp_stage)))
return -ENOMEM;
if (!imgu_dmamap_alloc(imgu,
&css_pipe->xmem_isp_stage_ptrs[p][i],
sizeof(struct imgu_abi_isp_stage)))
return -ENOMEM;
}
if (!imgu_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
ALIGN(sizeof(struct imgu_abi_ddr_address_map),
IMGU_ABI_ISP_DDR_WORD_BYTES)))
return -ENOMEM;
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
for (i = 0; i < abi_buf_num; i++)
if (!imgu_dmamap_alloc(imgu,
&css_pipe->abi_buffers[q][i],
sizeof(struct imgu_abi_buffer)))
return -ENOMEM;
}
if (imgu_css_binary_preallocate(css, pipe)) {
imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
return 0;
}
static void imgu_css_pipe_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i, abi_buf_num;
imgu_css_binary_cleanup(css, pipe);
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
for (i = 0; i < abi_buf_num; i++)
imgu_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
}
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
imgu_dmamap_free(imgu,
&css_pipe->xmem_sp_stage_ptrs[p][i]);
imgu_dmamap_free(imgu,
&css_pipe->xmem_isp_stage_ptrs[p][i]);
}
imgu_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
}
void imgu_css_cleanup(struct imgu_css *css)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int pipe;
imgu_css_stop_streaming(css);
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
imgu_css_pipe_cleanup(css, pipe);
imgu_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
imgu_css_fw_cleanup(css);
}
int imgu_css_init(struct device *dev, struct imgu_css *css,
void __iomem *base, int length)
{
struct imgu_device *imgu = dev_get_drvdata(dev);
int r, q, pipe;
/* Initialize main data structure */
css->dev = dev;
css->base = base;
css->iomem_length = length;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++) {
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
css_pipe->vf_output_en = false;
spin_lock_init(&css_pipe->qlock);
css_pipe->bindex = IPU3_CSS_DEFAULT_BINARY;
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0);
if (r)
return r;
}
r = imgu_css_map_init(css, pipe);
if (r) {
imgu_css_cleanup(css);
return r;
}
}
if (!imgu_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
sizeof(struct imgu_abi_sp_group)))
return -ENOMEM;
r = imgu_css_fw_init(css);
if (r)
return r;
return 0;
}
static u32 imgu_css_adjust(u32 res, u32 align)
{
u32 val = max_t(u32, IPU3_CSS_MIN_RES, res);
return DIV_ROUND_CLOSEST(val, align) * align;
}
/* Select a binary matching the required resolutions and formats */
static int imgu_css_find_binary(struct imgu_css *css,
unsigned int pipe,
struct imgu_css_queue queue[IPU3_CSS_QUEUES],
struct v4l2_rect rects[IPU3_CSS_RECTS])
{
const int binary_nr = css->fwp->file_header.binary_nr;
unsigned int binary_mode =
(css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_CAPTURE) ?
IA_CSS_BINARY_MODE_PRIMARY : IA_CSS_BINARY_MODE_VIDEO;
const struct v4l2_pix_format_mplane *in =
&queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
const struct v4l2_pix_format_mplane *out =
&queue[IPU3_CSS_QUEUE_OUT].fmt.mpix;
const struct v4l2_pix_format_mplane *vf =
&queue[IPU3_CSS_QUEUE_VF].fmt.mpix;
u32 stripe_w = 0, stripe_h = 0;
const char *name;
int i, j;
if (!imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
return -EINVAL;
/* Find out the strip size boundary */
for (i = 0; i < binary_nr; i++) {
struct imgu_fw_info *bi = &css->fwp->binary_header[i];
u32 max_width = bi->info.isp.sp.output.max_width;
u32 max_height = bi->info.isp.sp.output.max_height;
if (bi->info.isp.sp.iterator.num_stripes <= 1) {
stripe_w = stripe_w ?
min(stripe_w, max_width) : max_width;
stripe_h = stripe_h ?
min(stripe_h, max_height) : max_height;
}
}
for (i = 0; i < binary_nr; i++) {
struct imgu_fw_info *bi = &css->fwp->binary_header[i];
enum imgu_abi_frame_format q_fmt;
name = (void *)css->fwp + bi->blob.prog_name_offset;
/* Check that binary supports memory-to-memory processing */
if (bi->info.isp.sp.input.source !=
IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY)
continue;
/* Check that binary supports raw10 input */
if (!bi->info.isp.sp.enable.input_feeder &&
!bi->info.isp.sp.enable.input_raw)
continue;
/* Check binary mode */
if (bi->info.isp.sp.pipeline.mode != binary_mode)
continue;
/* Since input is RGGB bayer, need to process colors */
if (bi->info.isp.sp.enable.luma_only)
continue;
if (in->width < bi->info.isp.sp.input.min_width ||
in->width > bi->info.isp.sp.input.max_width ||
in->height < bi->info.isp.sp.input.min_height ||
in->height > bi->info.isp.sp.input.max_height)
continue;
if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
if (bi->info.isp.num_output_pins <= 0)
continue;
q_fmt = queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
for (j = 0; j < bi->info.isp.num_output_formats; j++)
if (bi->info.isp.output_formats[j] == q_fmt)
break;
if (j >= bi->info.isp.num_output_formats)
continue;
if (out->width < bi->info.isp.sp.output.min_width ||
out->width > bi->info.isp.sp.output.max_width ||
out->height < bi->info.isp.sp.output.min_height ||
out->height > bi->info.isp.sp.output.max_height)
continue;
if (out->width > bi->info.isp.sp.internal.max_width ||
out->height > bi->info.isp.sp.internal.max_height)
continue;
}
if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
if (bi->info.isp.num_output_pins <= 1)
continue;
q_fmt = queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
for (j = 0; j < bi->info.isp.num_output_formats; j++)
if (bi->info.isp.output_formats[j] == q_fmt)
break;
if (j >= bi->info.isp.num_output_formats)
continue;
if (vf->width < bi->info.isp.sp.output.min_width ||
vf->width > bi->info.isp.sp.output.max_width ||
vf->height < bi->info.isp.sp.output.min_height ||
vf->height > bi->info.isp.sp.output.max_height)
continue;
}
/* All checks passed, select the binary */
dev_dbg(css->dev, "using binary %s id = %u\n", name,
bi->info.isp.sp.id);
return i;
}
/* Can not find suitable binary for these parameters */
return -EINVAL;
}
/*
* Check that there is a binary matching requirements. Parameters may be
* NULL indicating disabled input/output. Return negative if given
* parameters can not be supported or on error, zero or positive indicating
* found binary number. May modify the given parameters if not exact match
* is found.
*/
int imgu_css_fmt_try(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe)
{
static const u32 EFF_ALIGN_W = 2;
static const u32 BDS_ALIGN_W = 4;
static const u32 OUT_ALIGN_W = 8;
static const u32 OUT_ALIGN_H = 4;
static const u32 VF_ALIGN_W = 2;
static const char *qnames[IPU3_CSS_QUEUES] = {
[IPU3_CSS_QUEUE_IN] = "in",
[IPU3_CSS_QUEUE_PARAMS] = "params",
[IPU3_CSS_QUEUE_OUT] = "out",
[IPU3_CSS_QUEUE_VF] = "vf",
[IPU3_CSS_QUEUE_STAT_3A] = "3a",
};
static const char *rnames[IPU3_CSS_RECTS] = {
[IPU3_CSS_RECT_EFFECTIVE] = "effective resolution",
[IPU3_CSS_RECT_BDS] = "bayer-domain scaled resolution",
[IPU3_CSS_RECT_ENVELOPE] = "DVS envelope size",
[IPU3_CSS_RECT_GDC] = "GDC output res",
};
struct v4l2_rect r[IPU3_CSS_RECTS] = { };
struct v4l2_rect *const eff = &r[IPU3_CSS_RECT_EFFECTIVE];
struct v4l2_rect *const bds = &r[IPU3_CSS_RECT_BDS];
struct v4l2_rect *const env = &r[IPU3_CSS_RECT_ENVELOPE];
struct v4l2_rect *const gdc = &r[IPU3_CSS_RECT_GDC];
struct imgu_css_queue *q;
struct v4l2_pix_format_mplane *in, *out, *vf;
int i, s, ret;
q = kcalloc(IPU3_CSS_QUEUES, sizeof(struct imgu_css_queue), GFP_KERNEL);
if (!q)
return -ENOMEM;
in = &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
out = &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
vf = &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
/* Adjust all formats, get statistics buffer sizes and formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i])
dev_dbg(css->dev, "%s %s: (%i,%i) fmt 0x%x\n", __func__,
qnames[i], fmts[i]->width, fmts[i]->height,
fmts[i]->pixelformat);
else
dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
qnames[i]);
if (imgu_css_queue_init(&q[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_notice(css->dev, "can not initialize queue %s\n",
qnames[i]);
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < IPU3_CSS_RECTS; i++) {
if (rects[i]) {
dev_dbg(css->dev, "%s %s: (%i,%i)\n", __func__,
rnames[i], rects[i]->width, rects[i]->height);
r[i].width = rects[i]->width;
r[i].height = rects[i]->height;
} else {
dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
rnames[i]);
}
/* For now, force known good resolutions */
r[i].left = 0;
r[i].top = 0;
}
/* Always require one input and vf only if out is also enabled */
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
dev_warn(css->dev, "required queues are disabled\n");
ret = -EINVAL;
goto out;
}
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
out->width = in->width;
out->height = in->height;
}
if (eff->width <= 0 || eff->height <= 0) {
eff->width = in->width;
eff->height = in->height;
}
if (bds->width <= 0 || bds->height <= 0) {
bds->width = out->width;
bds->height = out->height;
}
if (gdc->width <= 0 || gdc->height <= 0) {
gdc->width = out->width;
gdc->height = out->height;
}
in->width = imgu_css_adjust(in->width, 1);
in->height = imgu_css_adjust(in->height, 1);
eff->width = imgu_css_adjust(eff->width, EFF_ALIGN_W);
eff->height = imgu_css_adjust(eff->height, 1);
bds->width = imgu_css_adjust(bds->width, BDS_ALIGN_W);
bds->height = imgu_css_adjust(bds->height, 1);
gdc->width = imgu_css_adjust(gdc->width, OUT_ALIGN_W);
gdc->height = imgu_css_adjust(gdc->height, OUT_ALIGN_H);
out->width = imgu_css_adjust(out->width, OUT_ALIGN_W);
out->height = imgu_css_adjust(out->height, OUT_ALIGN_H);
vf->width = imgu_css_adjust(vf->width, VF_ALIGN_W);
vf->height = imgu_css_adjust(vf->height, 1);
s = (bds->width - gdc->width) / 2;
env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
s = (bds->height - gdc->height) / 2;
env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
ret = imgu_css_find_binary(css, pipe, q, r);
if (ret < 0) {
dev_err(css->dev, "failed to find suitable binary\n");
ret = -EINVAL;
goto out;
}
css->pipes[pipe].bindex = ret;
dev_dbg(css->dev, "Binary index %d for pipe %d found.",
css->pipes[pipe].bindex, pipe);
/* Final adjustment and set back the queried formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i]) {
if (imgu_css_queue_init(&q[i], &q[i].fmt.mpix,
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_err(css->dev,
"final resolution adjustment failed\n");
ret = -EINVAL;
goto out;
}
*fmts[i] = q[i].fmt.mpix;
}
}
for (i = 0; i < IPU3_CSS_RECTS; i++)
if (rects[i])
*rects[i] = r[i];
dev_dbg(css->dev,
"in(%u,%u) if(%u,%u) ds(%u,%u) gdc(%u,%u) out(%u,%u) vf(%u,%u)",
in->width, in->height, eff->width, eff->height,
bds->width, bds->height, gdc->width, gdc->height,
out->width, out->height, vf->width, vf->height);
ret = 0;
out:
kfree(q);
return ret;
}
int imgu_css_fmt_set(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe)
{
struct v4l2_rect rect_data[IPU3_CSS_RECTS];
struct v4l2_rect *all_rects[IPU3_CSS_RECTS];
int i, r;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (i = 0; i < IPU3_CSS_RECTS; i++) {
if (rects[i])
rect_data[i] = *rects[i];
else
memset(&rect_data[i], 0, sizeof(rect_data[i]));
all_rects[i] = &rect_data[i];
}
r = imgu_css_fmt_try(css, fmts, all_rects, pipe);
if (r < 0)
return r;
for (i = 0; i < IPU3_CSS_QUEUES; i++)
if (imgu_css_queue_init(&css_pipe->queue[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i)))
return -EINVAL;
for (i = 0; i < IPU3_CSS_RECTS; i++) {
css_pipe->rect[i] = rect_data[i];
if (rects[i])
*rects[i] = rect_data[i];
}
return 0;
}
int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt)
{
switch (fmt->dataformat) {
case V4L2_META_FMT_IPU3_PARAMS:
fmt->buffersize = sizeof(struct ipu3_uapi_params);
/*
* Sanity check for the parameter struct size. This must
* not change!
*/
BUILD_BUG_ON(sizeof(struct ipu3_uapi_params) != 39328);
break;
case V4L2_META_FMT_IPU3_STAT_3A:
fmt->buffersize = sizeof(struct ipu3_uapi_stats_3a);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Queue given buffer to CSS. imgu_css_buf_prepare() must have been first
* called for the buffer. May be called from interrupt context.
* Returns 0 on success, -EBUSY if the buffer queue is full, or some other
* code on error conditions.
*/
int imgu_css_buf_queue(struct imgu_css *css, unsigned int pipe,
struct imgu_css_buffer *b)
{
struct imgu_abi_buffer *abi_buf;
struct imgu_addr_t *buf_addr;
u32 data;
int r;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
if (!css->streaming)
return -EPROTO; /* CSS or buffer in wrong state */
if (b->queue >= IPU3_CSS_QUEUES || !imgu_css_queues[b->queue].qid)
return -EINVAL;
b->queue_pos = imgu_css_queue_pos(css, imgu_css_queues[b->queue].qid,
pipe);
if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
return -EIO;
abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
/* Fill struct abi_buffer for firmware */
memset(abi_buf, 0, sizeof(*abi_buf));
buf_addr = (void *)abi_buf + imgu_css_queues[b->queue].ptr_ofs;
*(imgu_addr_t *)buf_addr = b->daddr;
if (b->queue == IPU3_CSS_QUEUE_STAT_3A)
abi_buf->payload.s3a.data.dmem.s3a_tbl = b->daddr;
if (b->queue == IPU3_CSS_QUEUE_OUT)
abi_buf->payload.frame.padded_width =
css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
if (b->queue == IPU3_CSS_QUEUE_VF)
abi_buf->payload.frame.padded_width =
css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
spin_lock(&css_pipe->qlock);
list_add_tail(&b->list, &css_pipe->queue[b->queue].bufs);
spin_unlock(&css_pipe->qlock);
b->state = IPU3_CSS_BUFFER_QUEUED;
data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
r = imgu_css_queue_data(css, imgu_css_queues[b->queue].qid,
pipe, data);
if (r < 0)
goto queueing_failed;
data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
imgu_css_queues[b->queue].qid);
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
if (r < 0)
goto queueing_failed;
dev_dbg(css->dev, "queued buffer %p to css queue %i in pipe %d\n",
b, b->queue, pipe);
return 0;
queueing_failed:
b->state = (r == -EBUSY || r == -EAGAIN) ?
IPU3_CSS_BUFFER_NEW : IPU3_CSS_BUFFER_FAILED;
list_del(&b->list);
return r;
}
/*
* Get next ready CSS buffer. Returns -EAGAIN in which case the function
* should be called again, or -EBUSY which means that there are no more
* buffers available. May be called from interrupt context.
*/
struct imgu_css_buffer *imgu_css_buf_dequeue(struct imgu_css *css)
{
static const unsigned char evtype_to_queue[] = {
[IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE] = IPU3_CSS_QUEUE_IN,
[IMGU_ABI_EVTTYPE_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_OUT,
[IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_VF,
[IMGU_ABI_EVTTYPE_3A_STATS_DONE] = IPU3_CSS_QUEUE_STAT_3A,
};
struct imgu_css_buffer *b = ERR_PTR(-EAGAIN);
u32 event, daddr;
int evtype, pipe, pipeid, queue, qid, r;
struct imgu_css_pipe *css_pipe;
if (!css->streaming)
return ERR_PTR(-EPROTO);
r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
if (r < 0)
return ERR_PTR(r);
evtype = (event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
IMGU_ABI_EVTTYPE_EVENT_SHIFT;
switch (evtype) {
case IMGU_ABI_EVTTYPE_OUT_FRAME_DONE:
case IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE:
case IMGU_ABI_EVTTYPE_3A_STATS_DONE:
case IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE:
pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
IMGU_ABI_EVTTYPE_PIPE_SHIFT;
pipeid = (event & IMGU_ABI_EVTTYPE_PIPEID_MASK) >>
IMGU_ABI_EVTTYPE_PIPEID_SHIFT;
queue = evtype_to_queue[evtype];
qid = imgu_css_queues[queue].qid;
if (pipe >= IMGU_MAX_PIPE_NUM) {
dev_err(css->dev, "Invalid pipe: %i\n", pipe);
return ERR_PTR(-EIO);
}
if (qid >= IMGU_ABI_QUEUE_NUM) {
dev_err(css->dev, "Invalid qid: %i\n", qid);
return ERR_PTR(-EIO);
}
css_pipe = &css->pipes[pipe];
dev_dbg(css->dev,
"event: buffer done 0x%x queue %i pipe %i pipeid %i\n",
event, queue, pipe, pipeid);
r = imgu_css_dequeue_data(css, qid, &daddr);
if (r < 0) {
dev_err(css->dev, "failed to dequeue buffer\n");
/* Force real error, not -EBUSY */
return ERR_PTR(-EIO);
}
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
if (r < 0) {
dev_err(css->dev, "failed to queue event\n");
return ERR_PTR(-EIO);
}
spin_lock(&css_pipe->qlock);
if (list_empty(&css_pipe->queue[queue].bufs)) {
spin_unlock(&css_pipe->qlock);
dev_err(css->dev, "event on empty queue\n");
return ERR_PTR(-EIO);
}
b = list_first_entry(&css_pipe->queue[queue].bufs,
struct imgu_css_buffer, list);
if (queue != b->queue ||
daddr != css_pipe->abi_buffers
[b->queue][b->queue_pos].daddr) {
spin_unlock(&css_pipe->qlock);
dev_err(css->dev, "dequeued bad buffer 0x%x\n", daddr);
return ERR_PTR(-EIO);
}
dev_dbg(css->dev, "buffer 0x%8x done from pipe %d\n", daddr, pipe);
b->pipe = pipe;
b->state = IPU3_CSS_BUFFER_DONE;
list_del(&b->list);
spin_unlock(&css_pipe->qlock);
break;
case IMGU_ABI_EVTTYPE_PIPELINE_DONE:
pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
IMGU_ABI_EVTTYPE_PIPE_SHIFT;
if (pipe >= IMGU_MAX_PIPE_NUM) {
dev_err(css->dev, "Invalid pipe: %i\n", pipe);
return ERR_PTR(-EIO);
}
dev_dbg(css->dev, "event: pipeline done 0x%8x for pipe %d\n",
event, pipe);
break;
case IMGU_ABI_EVTTYPE_TIMER:
r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
if (r < 0)
return ERR_PTR(r);
if ((event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
IMGU_ABI_EVTTYPE_EVENT_SHIFT == IMGU_ABI_EVTTYPE_TIMER)
dev_dbg(css->dev, "event: timer\n");
else
dev_warn(css->dev, "half of timer event missing\n");
break;
case IMGU_ABI_EVTTYPE_FW_WARNING:
dev_warn(css->dev, "event: firmware warning 0x%x\n", event);
break;
case IMGU_ABI_EVTTYPE_FW_ASSERT:
dev_err(css->dev,
"event: firmware assert 0x%x module_id %i line_no %i\n",
event,
(event & IMGU_ABI_EVTTYPE_MODULEID_MASK) >>
IMGU_ABI_EVTTYPE_MODULEID_SHIFT,
swab16((event & IMGU_ABI_EVTTYPE_LINENO_MASK) >>
IMGU_ABI_EVTTYPE_LINENO_SHIFT));
break;
default:
dev_warn(css->dev, "received unknown event 0x%x\n", event);
}
return b;
}
/*
* Get a new set of parameters from pool and initialize them based on
* the parameters params, gdc, and obgrid. Any of these may be NULL,
* in which case the previously set parameters are used.
* If parameters haven't been set previously, initialize from scratch.
*
* Return index to css->parameter_set_info which has the newly created
* parameters or negative value on error.
*/
int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_params *set_params)
{
static const unsigned int queue_id = IMGU_ABI_QUEUE_A_ID;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const int stage = 0;
const struct imgu_fw_info *bi;
int obgrid_size;
unsigned int stripes, i;
struct ipu3_uapi_flags *use = set_params ? &set_params->use : NULL;
/* Destination buffers which are filled here */
struct imgu_abi_parameter_set_info *param_set;
struct imgu_abi_acc_param *acc = NULL;
struct imgu_abi_gdc_warp_param *gdc = NULL;
struct ipu3_uapi_obgrid_param *obgrid = NULL;
const struct imgu_css_map *map;
void *vmem0 = NULL;
void *dmem0 = NULL;
enum imgu_abi_memories m;
int r = -EBUSY;
if (!css->streaming)
return -EPROTO;
dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
bi = &css->fwp->binary_header[css_pipe->bindex];
obgrid_size = imgu_css_fw_obgrid_size(bi);
stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
imgu_css_pool_get(&css_pipe->pool.parameter_set_info);
param_set = imgu_css_pool_last(&css_pipe->pool.parameter_set_info,
0)->vaddr;
/* Get a new acc only if new parameters given, or none yet */
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
if (set_params || !map->vaddr) {
imgu_css_pool_get(&css_pipe->pool.acc);
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
acc = map->vaddr;
}
/* Get new VMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_VMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
set_params->use.tnr3_vmem_params ||
set_params->use.xnr3_vmem_params))) {
imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
vmem0 = map->vaddr;
}
/* Get new DMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_DMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
set_params->use.xnr3_dmem_params))) {
imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
dmem0 = map->vaddr;
}
/* Configure acc parameter cluster */
if (acc) {
/* get acc_old */
map = imgu_css_pool_last(&css_pipe->pool.acc, 1);
/* user acc */
r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr,
set_params ? &set_params->acc_param : NULL);
if (r < 0)
goto fail;
}
/* Configure late binding parameters */
if (vmem0) {
m = IMGU_ABI_MEM_ISP_VMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
r = imgu_css_cfg_vmem0(css, pipe, use, vmem0,
map->vaddr, set_params);
if (r < 0)
goto fail;
}
if (dmem0) {
m = IMGU_ABI_MEM_ISP_DMEM0;
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
r = imgu_css_cfg_dmem0(css, pipe, use, dmem0,
map->vaddr, set_params);
if (r < 0)
goto fail;
}
/* Get a new gdc only if a new gdc is given, or none yet */
if (bi->info.isp.sp.enable.dvs_6axis) {
unsigned int a = IPU3_CSS_AUX_FRAME_REF;
unsigned int g = IPU3_CSS_RECT_GDC;
unsigned int e = IPU3_CSS_RECT_ENVELOPE;
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
if (!map->vaddr) {
imgu_css_pool_get(&css_pipe->pool.gdc);
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
gdc = map->vaddr;
imgu_css_cfg_gdc_table(map->vaddr,
css_pipe->aux_frames[a].bytesperline /
css_pipe->aux_frames[a].bytesperpixel,
css_pipe->aux_frames[a].height,
css_pipe->rect[g].width,
css_pipe->rect[g].height,
css_pipe->rect[e].width,
css_pipe->rect[e].height);
}
}
/* Get a new obgrid only if a new obgrid is given, or none yet */
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
imgu_css_pool_get(&css_pipe->pool.obgrid);
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
obgrid = map->vaddr;
/* Configure optical black level grid (obgrid) */
if (set_params && set_params->use.obgrid_param)
for (i = 0; i < obgrid_size / sizeof(*obgrid); i++)
obgrid[i] = set_params->obgrid_param;
else
memset(obgrid, 0, obgrid_size);
}
/* Configure parameter set info, queued to `queue_id' */
memset(param_set, 0, sizeof(*param_set));
map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
param_set->mem_map.dvs_6axis_params_y = map->daddr;
for (i = 0; i < stripes; i++) {
map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
param_set->mem_map.obgrid_tbl[i] =
map->daddr + (obgrid_size / stripes) * i;
}
for (m = 0; m < IMGU_ABI_NUM_MEMORIES; m++) {
map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
}
/* Then queue the new parameter buffer */
map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
r = imgu_css_queue_data(css, queue_id, pipe, map->daddr);
if (r < 0)
goto fail;
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
queue_id));
if (r < 0)
goto fail_no_put;
/* Finally dequeue all old parameter buffers */
do {
u32 daddr;
r = imgu_css_dequeue_data(css, queue_id, &daddr);
if (r == -EBUSY)
break;
if (r)
goto fail_no_put;
r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED
(queue_id));
if (r < 0) {
dev_err(css->dev, "failed to queue parameter event\n");
goto fail_no_put;
}
} while (1);
return 0;
fail:
/*
* A failure, most likely the parameter queue was full.
* Return error but continue streaming. User can try submitting new
* parameters again later.
*/
imgu_css_pool_put(&css_pipe->pool.parameter_set_info);
if (acc)
imgu_css_pool_put(&css_pipe->pool.acc);
if (gdc)
imgu_css_pool_put(&css_pipe->pool.gdc);
if (obgrid)
imgu_css_pool_put(&css_pipe->pool.obgrid);
if (vmem0)
imgu_css_pool_put(
&css_pipe->pool.binary_params_p
[IMGU_ABI_MEM_ISP_VMEM0]);
if (dmem0)
imgu_css_pool_put(
&css_pipe->pool.binary_params_p
[IMGU_ABI_MEM_ISP_DMEM0]);
fail_no_put:
return r;
}
int imgu_css_irq_ack(struct imgu_css *css)
{
static const int NUM_SWIRQS = 3;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
void __iomem *const base = css->base;
u32 irq_status[IMGU_IRQCTRL_NUM];
int i;
u32 imgu_status = readl(base + IMGU_REG_INT_STATUS);
writel(imgu_status, base + IMGU_REG_INT_STATUS);
for (i = 0; i < IMGU_IRQCTRL_NUM; i++)
irq_status[i] = readl(base + IMGU_REG_IRQCTRL_STATUS(i));
for (i = 0; i < NUM_SWIRQS; i++) {
if (irq_status[IMGU_IRQCTRL_SP0] & IMGU_IRQCTRL_IRQ_SW_PIN(i)) {
/* SP SW interrupt */
u32 cnt = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.output);
u32 val = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.output + 4 + 4 * i);
dev_dbg(css->dev, "%s: swirq %i cnt %i val 0x%x\n",
__func__, i, cnt, val);
}
}
for (i = IMGU_IRQCTRL_NUM - 1; i >= 0; i--)
if (irq_status[i]) {
writel(irq_status[i], base + IMGU_REG_IRQCTRL_CLEAR(i));
/* Wait for write to complete */
readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
}
dev_dbg(css->dev, "%s: imgu 0x%x main 0x%x sp0 0x%x sp1 0x%x\n",
__func__, imgu_status, irq_status[IMGU_IRQCTRL_MAIN],
irq_status[IMGU_IRQCTRL_SP0], irq_status[IMGU_IRQCTRL_SP1]);
if (!imgu_status && !irq_status[IMGU_IRQCTRL_MAIN])
return -ENOMSG;
return 0;
}
|
/*
* Amiga Linux interrupt handling code
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/amipcmcia.h>
/*
* Enable/disable a particular machine specific interrupt source.
* Note that this may affect other interrupts in case of a shared interrupt.
* This function should only be called for a _very_ short time to change some
* internal data, that may not be changed by the interrupt at the same time.
*/
static void amiga_irq_enable(struct irq_data *data)
{
amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER));
}
static void amiga_irq_disable(struct irq_data *data)
{
amiga_custom.intena = 1 << (data->irq - IRQ_USER);
}
static struct irq_chip amiga_irq_chip = {
.name = "amiga",
.irq_enable = amiga_irq_enable,
.irq_disable = amiga_irq_disable,
};
/*
* The builtin Amiga hardware interrupt handlers.
*/
static void ami_int1(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if serial transmit buffer empty, interrupt */
if (ints & IF_TBE) {
amiga_custom.intreq = IF_TBE;
generic_handle_irq(IRQ_AMIGA_TBE);
}
/* if floppy disk transfer complete, interrupt */
if (ints & IF_DSKBLK) {
amiga_custom.intreq = IF_DSKBLK;
generic_handle_irq(IRQ_AMIGA_DSKBLK);
}
/* if software interrupt set, interrupt */
if (ints & IF_SOFT) {
amiga_custom.intreq = IF_SOFT;
generic_handle_irq(IRQ_AMIGA_SOFT);
}
}
static void ami_int3(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if a blitter interrupt */
if (ints & IF_BLIT) {
amiga_custom.intreq = IF_BLIT;
generic_handle_irq(IRQ_AMIGA_BLIT);
}
/* if a copper interrupt */
if (ints & IF_COPER) {
amiga_custom.intreq = IF_COPER;
generic_handle_irq(IRQ_AMIGA_COPPER);
}
/* if a vertical blank interrupt */
if (ints & IF_VERTB) {
amiga_custom.intreq = IF_VERTB;
generic_handle_irq(IRQ_AMIGA_VERTB);
}
}
static void ami_int4(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if audio 0 interrupt */
if (ints & IF_AUD0) {
amiga_custom.intreq = IF_AUD0;
generic_handle_irq(IRQ_AMIGA_AUD0);
}
/* if audio 1 interrupt */
if (ints & IF_AUD1) {
amiga_custom.intreq = IF_AUD1;
generic_handle_irq(IRQ_AMIGA_AUD1);
}
/* if audio 2 interrupt */
if (ints & IF_AUD2) {
amiga_custom.intreq = IF_AUD2;
generic_handle_irq(IRQ_AMIGA_AUD2);
}
/* if audio 3 interrupt */
if (ints & IF_AUD3) {
amiga_custom.intreq = IF_AUD3;
generic_handle_irq(IRQ_AMIGA_AUD3);
}
}
static void ami_int5(struct irq_desc *desc)
{
unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if serial receive buffer full interrupt */
if (ints & IF_RBF) {
/* acknowledge of IF_RBF must be done by the serial interrupt */
generic_handle_irq(IRQ_AMIGA_RBF);
}
/* if a disk sync interrupt */
if (ints & IF_DSKSYN) {
amiga_custom.intreq = IF_DSKSYN;
generic_handle_irq(IRQ_AMIGA_DSKSYN);
}
}
/*
* void amiga_init_IRQ(void)
*
* Parameters: None
*
* Returns: Nothing
*
* This function should be called during kernel startup to initialize
* the amiga IRQ handling routines.
*/
void __init amiga_init_IRQ(void)
{
m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER,
AMI_STD_IRQS);
irq_set_chained_handler(IRQ_AUTO_1, ami_int1);
irq_set_chained_handler(IRQ_AUTO_3, ami_int3);
irq_set_chained_handler(IRQ_AUTO_4, ami_int4);
irq_set_chained_handler(IRQ_AUTO_5, ami_int5);
/* turn off PCMCIA interrupts */
if (AMIGAHW_PRESENT(PCMCIA))
gayle.inten = GAYLE_IRQ_IDE;
/* turn off all interrupts and enable the master interrupt bit */
amiga_custom.intena = 0x7fff;
amiga_custom.intreq = 0x7fff;
amiga_custom.intena = IF_SETCLR | IF_INTEN;
cia_init_IRQ(&ciaa_base);
cia_init_IRQ(&ciab_base);
}
|
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_DOUBLE_FAST_H
#define ZSTD_DOUBLE_FAST_H
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm);
size_t ZSTD_compressBlock_doubleFast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
#endif /* ZSTD_DOUBLE_FAST_H */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2021 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*****************************************************************************/
#ifndef __iwl_debug_h__
#define __iwl_debug_h__
#include "iwl-modparams.h"
static inline bool iwl_have_debug_level(u32 level)
{
#ifdef CONFIG_IWLWIFI_DEBUG
return iwlwifi_mod_params.debug_level & level;
#else
return false;
#endif
}
enum iwl_err_mode {
IWL_ERR_MODE_REGULAR,
IWL_ERR_MODE_RFKILL,
IWL_ERR_MODE_TRACE_ONLY,
IWL_ERR_MODE_RATELIMIT,
};
struct device;
void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...)
__printf(3, 4);
void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
/* No matter what is m (priv, bus, trans), this will work */
#define __IWL_ERR_DEV(d, mode, f, a...) \
do { \
CHECK_FOR_NEWLINE(f); \
__iwl_err((d), mode, f, ## a); \
} while (0)
#define IWL_ERR_DEV(d, f, a...) \
__IWL_ERR_DEV(d, IWL_ERR_MODE_REGULAR, f, ## a)
#define IWL_ERR(m, f, a...) \
IWL_ERR_DEV((m)->dev, f, ## a)
#define IWL_ERR_LIMIT(m, f, a...) \
__IWL_ERR_DEV((m)->dev, IWL_ERR_MODE_RATELIMIT, f, ## a)
#define IWL_WARN(m, f, a...) \
do { \
CHECK_FOR_NEWLINE(f); \
__iwl_warn((m)->dev, f, ## a); \
} while (0)
#define IWL_INFO(m, f, a...) \
do { \
CHECK_FOR_NEWLINE(f); \
__iwl_info((m)->dev, f, ## a); \
} while (0)
#define IWL_CRIT(m, f, a...) \
do { \
CHECK_FOR_NEWLINE(f); \
__iwl_crit((m)->dev, f, ## a); \
} while (0)
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...) __printf(5, 6);
#else
__printf(5, 6) static inline void
__iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...)
{}
#endif
#define iwl_print_hex_error(m, p, len) \
do { \
print_hex_dump(KERN_ERR, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
do { \
CHECK_FOR_NEWLINE(fmt); \
__iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
} while (0)
#define IWL_DEBUG(m, level, fmt, args...) \
__IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
__IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
__IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
#ifdef CONFIG_IWLWIFI_DEBUG
#define iwl_print_hex_dump(m, level, p, len) \
do { \
if (iwl_have_debug_level(level)) \
print_hex_dump(KERN_DEBUG, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#else
#define iwl_print_hex_dump(m, level, p, len)
#endif /* CONFIG_IWLWIFI_DEBUG */
/*
* To use the debug system:
*
* If you are defining a new debug classification, simply add it to the #define
* list here in the form of
*
* #define IWL_DL_xxxx VALUE
*
* where xxxx should be the name of the classification (for example, WEP).
*
* You then need to either add a IWL_xxxx_DEBUG() macro definition for your
* classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
* to send output to that classification.
*
* The active debug levels can be accessed via files
*
* /sys/module/iwlwifi/parameters/debug
* when CONFIG_IWLWIFI_DEBUG=y.
*
* /sys/kernel/debug/phy0/iwlwifi/debug/debug_level
* when CONFIG_IWLWIFI_DEBUGFS=y.
*
*/
/* 0x0000000F - 0x00000001 */
#define IWL_DL_INFO 0x00000001
#define IWL_DL_MAC80211 0x00000002
#define IWL_DL_HCMD 0x00000004
#define IWL_DL_TDLS 0x00000008
/* 0x000000F0 - 0x00000010 */
#define IWL_DL_QUOTA 0x00000010
#define IWL_DL_TE 0x00000020
#define IWL_DL_EEPROM 0x00000040
#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
#define IWL_DL_WOWLAN 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
#define IWL_DL_DROP 0x00002000
#define IWL_DL_LAR 0x00004000
#define IWL_DL_COEX 0x00008000
/* 0x000F0000 - 0x00010000 */
#define IWL_DL_FW 0x00010000
#define IWL_DL_RF_KILL 0x00020000
#define IWL_DL_TPT 0x00040000
/* 0x00F00000 - 0x00100000 */
#define IWL_DL_RATE 0x00100000
#define IWL_DL_CALIB 0x00200000
#define IWL_DL_WEP 0x00400000
#define IWL_DL_TX 0x00800000
/* 0x0F000000 - 0x01000000 */
#define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000
#define IWL_DL_EXTERNAL 0x08000000
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000
#define IWL_DL_TX_REPLY 0x40000000
#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
#define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a)
#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a)
#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a)
#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
#define IWL_DEBUG_ASSOC(p, f, a...) \
IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
#define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a)
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
#define IWL_DEBUG_FW_INFO(p, f, a...) \
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
#endif
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/dts-v1/;
#include "rk3588s-nanopi-r6.dtsi"
/ {
model = "FriendlyElec NanoPi R6S";
compatible = "friendlyarm,nanopi-r6s", "rockchip,rk3588s";
};
&lan2_led {
label = "lan2_led";
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen4 SYSC Power management support
*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "rcar-gen4-sysc.h"
/* SYSC Common */
#define SYSCSR 0x000 /* SYSC Status Register */
#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
/* Power Domain Registers */
#define PDRSR(n) (0x1000 + ((n) * 0x40))
#define PDRONCR(n) (0x1004 + ((n) * 0x40))
#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
#define PDRESR(n) (0x100C + ((n) * 0x40))
/* PWRON/PWROFF */
#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
/* PDRESR */
#define PDRESR_ERR BIT(0)
/* PDRSR */
#define PDRSR_OFF BIT(0) /* Power-OFF state */
#define PDRSR_ON BIT(4) /* Power-ON state */
#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
#define SYSCSR_TIMEOUT 10000
#define SYSCSR_DELAY_US 1
#define PDRESR_RETRIES 10000
#define PDRESR_DELAY_US 1
#define SYSCISCR_TIMEOUT 10000
#define SYSCISCR_DELAY_US 1
#define RCAR_GEN4_PD_ALWAYS_ON 64
#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
static void __iomem *rcar_gen4_sysc_base;
static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */
static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on)
{
unsigned int reg_offs;
u32 val;
int ret;
if (on)
reg_offs = PDRONCR(pdr);
else
reg_offs = PDROFFCR(pdr);
/* Wait until SYSC is ready to accept a power request */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val,
(val & SYSCSR_BUSY) == SYSCSR_BUSY,
SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
if (ret < 0)
return -EAGAIN;
/* Submit power shutoff or power resume request */
iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs);
return 0;
}
static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
{
u32 val;
int ret;
iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx));
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, !(val & isr_mask),
SYSCISCR_DELAY_US, SYSCISCR_TIMEOUT);
if (ret < 0) {
pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
return -EIO;
}
return 0;
}
static int rcar_gen4_sysc_power(u8 pdr, bool on)
{
unsigned int isr_mask;
unsigned int reg_idx, bit_idx;
unsigned int status;
unsigned long flags;
int ret = 0;
u32 val;
int k;
spin_lock_irqsave(&rcar_gen4_sysc_lock, flags);
reg_idx = pdr / NUM_DOMAINS_EACH_REG;
bit_idx = pdr % NUM_DOMAINS_EACH_REG;
isr_mask = BIT(bit_idx);
/*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask,
rcar_gen4_sysc_base + SYSCIER(reg_idx));
iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
rcar_gen4_sysc_base + SYSCIMR(reg_idx));
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
/* Submit power shutoff or resume request until it was accepted */
for (k = 0; k < PDRESR_RETRIES; k++) {
ret = rcar_gen4_sysc_pwr_on_off(pdr, on);
if (ret)
goto out;
status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr));
if (!(status & PDRESR_ERR))
break;
udelay(PDRESR_DELAY_US);
}
if (k == PDRESR_RETRIES) {
ret = -EIO;
goto out;
}
/* Wait until the power shutoff or resume request has completed * */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, (val & isr_mask),
SYSCISCR_DELAY_US, SYSCISCR_TIMEOUT);
if (ret < 0) {
ret = -EIO;
goto out;
}
/* Clear interrupt flags */
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
out:
spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret);
return ret;
}
static bool rcar_gen4_sysc_power_is_off(u8 pdr)
{
unsigned int st;
st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr));
if (st & PDRSR_OFF)
return true;
return false;
}
struct rcar_gen4_sysc_pd {
struct generic_pm_domain genpd;
u8 pdr;
unsigned int flags;
char name[];
};
static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d)
{
return container_of(d, struct rcar_gen4_sysc_pd, genpd);
}
static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd)
{
struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_gen4_sysc_power(pd->pdr, false);
}
static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd)
{
struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_gen4_sysc_power(pd->pdr, true);
}
static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
int error;
if (pd->flags & PD_CPU) {
/*
* This domain contains a CPU core and therefore it should
* only be turned off if the CPU is not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "CPU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_SCU) {
/*
* This domain contains an SCU and cache-controller, and
* therefore it should only be turned off if the CPU cores are
* not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "SCU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_NO_CR) {
/*
* This domain cannot be turned off.
*/
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
}
if (!(pd->flags & (PD_CPU | PD_SCU))) {
/* Enable Clock Domain for I/O devices */
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
}
genpd->power_off = rcar_gen4_sysc_pd_power_off;
genpd->power_on = rcar_gen4_sysc_pd_power_on;
if (pd->flags & (PD_CPU | PD_NO_CR)) {
/* Skip CPUs (handled by SMP code) and areas without control */
pr_debug("%s: Not touching %s\n", __func__, genpd->name);
goto finalize;
}
if (!rcar_gen4_sysc_power_is_off(pd->pdr)) {
pr_debug("%s: %s is already powered\n", __func__, genpd->name);
goto finalize;
}
rcar_gen4_sysc_power(pd->pdr, true);
finalize:
error = pm_genpd_init(genpd, &simple_qos_governor, false);
if (error)
pr_err("Failed to init PM domain %s: %d\n", name, error);
return error;
}
static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A779A0
{ .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A779F0
{ .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A779G0
{ .compatible = "renesas,r8a779g0-sysc", .data = &r8a779g0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A779H0
{ .compatible = "renesas,r8a779h0-sysc", .data = &r8a779h0_sysc_info },
#endif
{ /* sentinel */ }
};
struct rcar_gen4_pm_domains {
struct genpd_onecell_data onecell_data;
struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1];
};
static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data;
static int __init rcar_gen4_sysc_pd_init(void)
{
const struct rcar_gen4_sysc_info *info;
const struct of_device_id *match;
struct rcar_gen4_pm_domains *domains;
struct device_node *np;
void __iomem *base;
unsigned int i;
int error;
np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match);
if (!np)
return -ENODEV;
info = match->data;
base = of_iomap(np, 0);
if (!base) {
pr_warn("%pOF: Cannot map regs\n", np);
error = -ENOMEM;
goto out_put;
}
rcar_gen4_sysc_base = base;
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
goto out_put;
}
domains->onecell_data.domains = domains->domains;
domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
rcar_gen4_sysc_onecell_data = &domains->onecell_data;
for (i = 0; i < info->num_areas; i++) {
const struct rcar_gen4_sysc_area *area = &info->areas[i];
struct rcar_gen4_sysc_pd *pd;
size_t n;
if (!area->name) {
/* Skip NULLified area */
continue;
}
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
error = -ENOMEM;
goto out_put;
}
memcpy(pd->name, area->name, n);
pd->genpd.name = pd->name;
pd->pdr = area->pdr;
pd->flags = area->flags;
error = rcar_gen4_sysc_pd_setup(pd);
if (error)
goto out_put;
domains->domains[area->pdr] = &pd->genpd;
if (area->parent < 0)
continue;
error = pm_genpd_add_subdomain(domains->domains[area->parent],
&pd->genpd);
if (error) {
pr_warn("Failed to add PM subdomain %s to parent %u\n",
area->name, area->parent);
goto out_put;
}
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
of_node_put(np);
return error;
}
early_initcall(rcar_gen4_sysc_pd_init);
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/sm3.h>
#include <crypto/internal/hash.h>
#include "cc_driver.h"
#include "cc_request_mgr.h"
#include "cc_buffer_mgr.h"
#include "cc_hash.h"
#include "cc_sram_mgr.h"
#define CC_MAX_HASH_SEQ_LEN 12
#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
#define CC_SM3_HASH_LEN_SIZE 8
struct cc_hash_handle {
u32 digest_len_sram_addr; /* const value in SRAM*/
u32 larval_digest_sram_addr; /* const value in SRAM */
struct list_head hash_list;
};
static const u32 cc_digest_len_init[] = {
0x00000040, 0x00000000, 0x00000000, 0x00000000 };
static const u32 cc_md5_init[] = {
SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
static const u32 cc_sha1_init[] = {
SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
static const u32 cc_sha224_init[] = {
SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
static const u32 cc_sha256_init[] = {
SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
static const u32 cc_digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
/*
* Due to the way the HW works, every double word in the SHA384 and SHA512
* larval hashes must be stored in hi/lo order
*/
#define hilo(x) upper_32_bits(x), lower_32_bits(x)
static const u32 cc_sha384_init[] = {
hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
static const u32 cc_sha512_init[] = {
hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
static const u32 cc_sm3_init[] = {
SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size);
static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size);
static const void *cc_larval_digest(struct device *dev, u32 mode);
struct cc_hash_alg {
struct list_head entry;
int hash_mode;
int hw_mode;
int inter_digestsize;
struct cc_drvdata *drvdata;
struct ahash_alg ahash_alg;
};
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
u8 *key;
};
/* hash per-session context */
struct cc_hash_ctx {
struct cc_drvdata *drvdata;
/* holds the origin digest; the digest after "setkey" if HMAC,*
* the initial digest if HASH.
*/
u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
dma_addr_t digest_buff_dma_addr;
/* use for hmac with key large then mode block size */
struct hash_key_req_ctx key_params;
int hash_mode;
int hw_mode;
int inter_digestsize;
unsigned int hash_len;
struct completion setkey_comp;
bool is_hmac;
};
static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
unsigned int flow_mode, struct cc_hw_desc desc[],
bool is_not_last_data, unsigned int *seq_size);
static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
{
if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
mode == DRV_HASH_SHA512) {
set_bytes_swap(desc, 1);
} else {
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
}
}
static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
unsigned int digestsize)
{
state->digest_result_dma_addr =
dma_map_single(dev, state->digest_result_buff,
digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
digestsize);
return -ENOMEM;
}
dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
digestsize, state->digest_result_buff,
&state->digest_result_dma_addr);
return 0;
}
static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
struct cc_hash_ctx *ctx)
{
bool is_hmac = ctx->is_hmac;
memset(state, 0, sizeof(*state));
if (is_hmac) {
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
ctx->hw_mode != DRV_CIPHER_CMAC) {
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
memcpy(state->digest_buff, ctx->digest_buff,
ctx->inter_digestsize);
if (ctx->hash_mode == DRV_HASH_SHA512 ||
ctx->hash_mode == DRV_HASH_SHA384)
memcpy(state->digest_bytes_len,
cc_digest_len_sha512_init,
ctx->hash_len);
else
memcpy(state->digest_bytes_len,
cc_digest_len_init,
ctx->hash_len);
}
if (ctx->hash_mode != DRV_HASH_NULL) {
dma_sync_single_for_cpu(dev,
ctx->opad_tmp_keys_dma_addr,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
memcpy(state->opad_digest_buff,
ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
}
} else { /*hash*/
/* Copy the initial digests if hash flow. */
const void *larval = cc_larval_digest(dev, ctx->hash_mode);
memcpy(state->digest_buff, larval, ctx->inter_digestsize);
}
}
static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
struct cc_hash_ctx *ctx)
{
bool is_hmac = ctx->is_hmac;
state->digest_buff_dma_addr =
dma_map_single(dev, state->digest_buff,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
ctx->inter_digestsize, state->digest_buff);
return -EINVAL;
}
dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
ctx->inter_digestsize, state->digest_buff,
&state->digest_buff_dma_addr);
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
state->digest_bytes_len_dma_addr =
dma_map_single(dev, state->digest_bytes_len,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len);
goto unmap_digest_buf;
}
dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
}
if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
state->opad_digest_dma_addr =
dma_map_single(dev, state->opad_digest_buff,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
goto unmap_digest_len;
}
dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
}
return 0;
unmap_digest_len:
if (state->digest_bytes_len_dma_addr) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
state->digest_bytes_len_dma_addr = 0;
}
unmap_digest_buf:
if (state->digest_buff_dma_addr) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
state->digest_buff_dma_addr = 0;
}
return -EINVAL;
}
static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
struct cc_hash_ctx *ctx)
{
if (state->digest_buff_dma_addr) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
&state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
if (state->digest_bytes_len_dma_addr) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
&state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
if (state->opad_digest_dma_addr) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
&state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
}
static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
unsigned int digestsize, u8 *result)
{
if (state->digest_result_dma_addr) {
dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
DMA_BIDIRECTIONAL);
dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
memcpy(result, state->digest_result_buff, digestsize);
}
state->digest_result_dma_addr = 0;
}
static void cc_update_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
dev_dbg(dev, "req=%pK\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
cc_unmap_hash_request(dev, state, req->src, false);
cc_unmap_req(dev, state, ctx);
}
ahash_request_complete(req, err);
}
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
cc_unmap_hash_request(dev, state, req->src, false);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
}
ahash_request_complete(req, err);
}
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
cc_unmap_hash_request(dev, state, req->src, false);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
}
ahash_request_complete(req, err);
}
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
int idx)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
cc_set_endianity(ctx->hash_mode, &desc[idx]);
idx++;
return idx;
}
static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
int idx)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
/* store the hash digest result in the context */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
cc_set_endianity(ctx->hash_mode, &desc[idx]);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
idx++;
/* Loading hash opad xor key state */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
ctx->inter_digestsize, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Load the hash current length */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_sram(&desc[idx],
cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
hw_desc_init(&desc[idx]);
set_din_no_dma(&desc[idx], 0, 0xfffff0);
set_dout_no_dma(&desc[idx], 0, 0, 1);
idx++;
/* Perform HASH update */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
digestsize, NS_BIT);
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
return idx;
}
static int cc_hash_digest(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 larval_digest_addr;
int idx = 0;
int rc = 0;
gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
cc_init_req(dev, state, ctx);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_result(dev, state, digestsize, result);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup request structure */
cc_req.user_cb = cc_digest_complete;
cc_req.user_arg = req;
/* If HMAC then load hash IPAD xor key, if HASH then load initial
* digest
*/
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
} else {
larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
ctx->hash_mode);
set_din_sram(&desc[idx], larval_digest_addr,
ctx->inter_digestsize);
}
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Load the hash current length */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI,
state->digest_bytes_len_dma_addr,
ctx->hash_len, NS_BIT);
} else {
set_din_const(&desc[idx], 0, ctx->hash_len);
if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
set_cipher_do(&desc[idx], DO_PAD);
}
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
if (is_hmac) {
/* HW last hash block padding (aka. "DO_PAD") */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
ctx->hash_len, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
idx++;
idx = cc_fin_hmac(desc, req, idx);
}
idx = cc_fin_result(desc, req, idx);
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_result(dev, state, digestsize, result);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
struct ahash_req_ctx *state, unsigned int idx)
{
/* Restore hash digest */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Restore hash current length */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
ctx->hash_len, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
return idx;
}
static int cc_hash_update(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 idx = 0;
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
"hmac" : "hash", nbytes);
if (nbytes == 0) {
/* no real updates required */
return 0;
}
rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
block_size, flags);
if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
nbytes);
/* No hardware updates are required */
return 0;
}
dev_err(dev, "map_ahash_request_update() failed\n");
return -ENOMEM;
}
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
cc_unmap_hash_request(dev, state, src, true);
return -EINVAL;
}
/* Setup request structure */
cc_req.user_cb = cc_update_complete;
cc_req.user_arg = req;
idx = cc_restore_hash(desc, ctx, state, idx);
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
idx++;
/* store current hash length in context */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
ctx->hash_len, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_do_finup(struct ahash_request *req, bool update)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
unsigned int idx = 0;
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
update ? "finup" : "final", nbytes);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup request structure */
cc_req.user_cb = cc_hash_complete;
cc_req.user_arg = req;
idx = cc_restore_hash(desc, ctx, state, idx);
/* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
ctx->hash_len, NS_BIT, 0);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
idx++;
if (is_hmac)
idx = cc_fin_hmac(desc, req, idx);
idx = cc_fin_result(desc, req, idx);
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_result(dev, state, digestsize, result);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_hash_finup(struct ahash_request *req)
{
return cc_do_finup(req, true);
}
static int cc_hash_final(struct ahash_request *req)
{
return cc_do_finup(req, false);
}
static int cc_hash_init(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
cc_init_req(dev, state, ctx);
return 0;
}
static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
struct cc_crypto_req cc_req = {};
struct cc_hash_ctx *ctx = NULL;
int blocksize = 0;
int digestsize = 0;
int i, idx = 0, rc = 0;
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 larval_addr;
struct device *dev;
ctx = crypto_ahash_ctx_dma(ahash);
dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "start keylen: %d", keylen);
blocksize = crypto_tfm_alg_blocksize(&ahash->base);
digestsize = crypto_ahash_digestsize(ahash);
larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
* any NON-ZERO value utilizes HMAC flow
*/
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
ctx->key_params.key = NULL;
if (keylen) {
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key_params.key)
return -ENOMEM;
ctx->key_params.key_dma_addr =
dma_map_single(dev, ctx->key_params.key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
ctx->key_params.key, keylen);
kfree_sensitive(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
if (keylen > blocksize) {
/* Load hash initial state */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_sram(&desc[idx], larval_addr,
ctx->inter_digestsize);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
ctx->key_params.key_dma_addr, keylen,
NS_BIT);
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
/* Get hashed key */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
digestsize, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
cc_set_endianity(ctx->hash_mode, &desc[idx]);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0, (blocksize - digestsize));
set_flow_mode(&desc[idx], BYPASS);
set_dout_dlli(&desc[idx],
(ctx->opad_tmp_keys_dma_addr +
digestsize),
(blocksize - digestsize), NS_BIT, 0);
idx++;
} else {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
ctx->key_params.key_dma_addr, keylen,
NS_BIT);
set_flow_mode(&desc[idx], BYPASS);
set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
keylen, NS_BIT, 0);
idx++;
if ((blocksize - keylen)) {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0,
(blocksize - keylen));
set_flow_mode(&desc[idx], BYPASS);
set_dout_dlli(&desc[idx],
(ctx->opad_tmp_keys_dma_addr +
keylen), (blocksize - keylen),
NS_BIT, 0);
idx++;
}
}
} else {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0, blocksize);
set_flow_mode(&desc[idx], BYPASS);
set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
blocksize, NS_BIT, 0);
idx++;
}
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
}
/* calc derived HMAC key */
for (idx = 0, i = 0; i < 2; i++) {
/* Load hash initial state */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
/* Prepare ipad key */
hw_desc_init(&desc[idx]);
set_xor_val(&desc[idx], hmac_pad_const[i]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
idx++;
/* Perform HASH update */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
blocksize, NS_BIT);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_xor_active(&desc[idx]);
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
* of the first HASH "update" state)
*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
if (i > 0) /* Not first iteration */
set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
ctx->inter_digestsize, NS_BIT, 0);
else /* First iteration */
set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
idx++;
}
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
kfree_sensitive(ctx->key_params.key);
return rc;
}
static int cc_xcbc_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct cc_crypto_req cc_req = {};
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = 0;
unsigned int idx = 0;
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
switch (keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_192:
case AES_KEYSIZE_256:
break;
default:
return -EINVAL;
}
ctx->key_params.keylen = keylen;
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key_params.key)
return -ENOMEM;
ctx->key_params.key_dma_addr =
dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
kfree_sensitive(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
ctx->is_hmac = true;
/* 1. Load the AES key */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
keylen, NS_BIT);
set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
set_key_size_aes(&desc[idx], keylen);
set_flow_mode(&desc[idx], S_DIN_to_AES);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
set_dout_dlli(&desc[idx],
(ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
set_dout_dlli(&desc[idx],
(ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
set_dout_dlli(&desc[idx],
(ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
kfree_sensitive(ctx->key_params.key);
return rc;
}
static int cc_cmac_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
ctx->is_hmac = true;
switch (keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_192:
case AES_KEYSIZE_256:
break;
default:
return -EINVAL;
}
ctx->key_params.keylen = keylen;
/* STAT_PHASE_1: Copy key to ctx */
dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
memcpy(ctx->opad_tmp_keys_buff, key, keylen);
if (keylen == 24) {
memset(ctx->opad_tmp_keys_buff + 24, 0,
CC_AES_KEY_SIZE_MAX - 24);
}
dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
ctx->key_params.keylen = keylen;
return 0;
}
static void cc_free_ctx(struct cc_hash_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (ctx->digest_buff_dma_addr) {
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
&ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
if (ctx->opad_tmp_keys_dma_addr) {
dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
&ctx->opad_tmp_keys_dma_addr);
ctx->opad_tmp_keys_dma_addr = 0;
}
ctx->key_params.keylen = 0;
}
static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
ctx->key_params.keylen = 0;
ctx->digest_buff_dma_addr =
dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr =
dma_map_single(dev, ctx->opad_tmp_keys_buff,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
ctx->opad_tmp_keys_buff);
goto fail;
}
dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
&ctx->opad_tmp_keys_dma_addr);
ctx->is_hmac = false;
return 0;
fail:
cc_free_ctx(ctx);
return -ENOMEM;
}
static int cc_get_hash_len(struct crypto_tfm *tfm)
{
struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
if (ctx->hash_mode == DRV_HASH_SM3)
return CC_SM3_HASH_LEN_SIZE;
else
return cc_get_default_hash_len(ctx->drvdata);
}
static int cc_cra_init(struct crypto_tfm *tfm)
{
struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
struct hash_alg_common *hash_alg_common =
container_of(tfm->__crt_alg, struct hash_alg_common, base);
struct ahash_alg *ahash_alg =
container_of(hash_alg_common, struct ahash_alg, halg);
struct cc_hash_alg *cc_alg =
container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
sizeof(struct ahash_req_ctx));
ctx->hash_mode = cc_alg->hash_mode;
ctx->hw_mode = cc_alg->hw_mode;
ctx->inter_digestsize = cc_alg->inter_digestsize;
ctx->drvdata = cc_alg->drvdata;
ctx->hash_len = cc_get_hash_len(tfm);
return cc_alloc_ctx(ctx);
}
static void cc_cra_exit(struct crypto_tfm *tfm)
{
struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "cc_cra_exit");
cc_free_ctx(ctx);
}
static int cc_mac_update(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int rc;
u32 idx = 0;
gfp_t flags = cc_gfp_flags(&req->base);
if (req->nbytes == 0) {
/* no real updates required */
return 0;
}
state->xcbc_count++;
rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
req->nbytes, block_size, flags);
if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
req->nbytes);
/* No hardware updates are required */
return 0;
}
dev_err(dev, "map_ahash_request_update() failed\n");
return -ENOMEM;
}
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
cc_setup_xcbc(req, desc, &idx);
else
cc_setup_cmac(req, desc, &idx);
cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_AES_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
idx++;
/* Setup request structure */
cc_req.user_cb = cc_update_complete;
cc_req.user_arg = req;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_mac_final(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
u32 key_size, key_len;
u32 digestsize = crypto_ahash_digestsize(tfm);
gfp_t flags = cc_gfp_flags(&req->base);
u32 rem_cnt = *cc_hash_buf_cnt(state);
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_size = CC_AES_128_BIT_KEY_SIZE;
key_len = CC_AES_128_BIT_KEY_SIZE;
} else {
key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
ctx->key_params.keylen;
key_len = ctx->key_params.keylen;
}
dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 0, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup request structure */
cc_req.user_cb = cc_hash_complete;
cc_req.user_arg = req;
if (state->xcbc_count && rem_cnt == 0) {
/* Load key for ECB decryption */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
set_din_type(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
key_size, NS_BIT);
set_key_size_aes(&desc[idx], key_len);
set_flow_mode(&desc[idx], S_DIN_to_AES);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
/* Initiate decryption of block state to previous
* block_state-XOR-M[n]
*/
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT, 0);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
idx++;
/* Memory Barrier: wait for axi write to complete */
hw_desc_init(&desc[idx]);
set_din_no_dma(&desc[idx], 0, 0xfffff0);
set_dout_no_dma(&desc[idx], 0, 0, 1);
idx++;
}
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
cc_setup_xcbc(req, desc, &idx);
else
cc_setup_cmac(req, desc, &idx);
if (state->xcbc_count == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else if (rem_cnt > 0) {
cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
} else {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
idx++;
}
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_AES_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_mac_finup(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
u32 key_len = 0;
u32 digestsize = crypto_ahash_digestsize(tfm);
gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
if (state->xcbc_count > 0 && req->nbytes == 0) {
dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
return cc_mac_final(req);
}
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup request structure */
cc_req.user_cb = cc_hash_complete;
cc_req.user_arg = req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
cc_setup_xcbc(req, desc, &idx);
} else {
key_len = ctx->key_params.keylen;
cc_setup_cmac(req, desc, &idx);
}
if (req->nbytes == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else {
cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_AES_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_mac_digest(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 key_len;
unsigned int idx = 0;
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
cc_init_req(dev, state, ctx);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup request structure */
cc_req.user_cb = cc_digest_complete;
cc_req.user_arg = req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
cc_setup_xcbc(req, desc, &idx);
} else {
key_len = ctx->key_params.keylen;
cc_setup_cmac(req, desc, &idx);
}
if (req->nbytes == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else {
cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_AES_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
}
return rc;
}
static int cc_hash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
u8 *curr_buff = cc_hash_buf(state);
u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
const u32 tmp = CC_EXPORT_MAGIC;
memcpy(out, &tmp, sizeof(u32));
out += sizeof(u32);
memcpy(out, state->digest_buff, ctx->inter_digestsize);
out += ctx->inter_digestsize;
memcpy(out, state->digest_bytes_len, ctx->hash_len);
out += ctx->hash_len;
memcpy(out, &curr_buff_cnt, sizeof(u32));
out += sizeof(u32);
memcpy(out, curr_buff, curr_buff_cnt);
return 0;
}
static int cc_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
u32 tmp;
memcpy(&tmp, in, sizeof(u32));
if (tmp != CC_EXPORT_MAGIC)
return -EINVAL;
in += sizeof(u32);
cc_init_req(dev, state, ctx);
memcpy(state->digest_buff, in, ctx->inter_digestsize);
in += ctx->inter_digestsize;
memcpy(state->digest_bytes_len, in, ctx->hash_len);
in += ctx->hash_len;
/* Sanity check the data as much as possible */
memcpy(&tmp, in, sizeof(u32));
if (tmp > CC_MAX_HASH_BLCK_SIZE)
return -EINVAL;
in += sizeof(u32);
state->buf_cnt[0] = tmp;
memcpy(state->buffers[0], in, tmp);
return 0;
}
struct cc_hash_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
char mac_driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
bool is_mac;
bool synchronize;
struct ahash_alg template_ahash;
int hash_mode;
int hw_mode;
int inter_digestsize;
struct cc_drvdata *drvdata;
u32 min_hw_rev;
enum cc_std_body std_body;
};
#define CC_STATE_SIZE(_x) \
((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
/* hash descriptors */
static struct cc_hash_template driver_hash[] = {
//Asynchronous hash template
{
.name = "sha1",
.driver_name = "sha1-ccree",
.mac_name = "hmac(sha1)",
.mac_driver_name = "hmac-sha1-ccree",
.blocksize = SHA1_BLOCK_SIZE,
.is_mac = true,
.synchronize = false,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA1,
.hw_mode = DRV_HASH_HW_SHA1,
.inter_digestsize = SHA1_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
},
{
.name = "sha256",
.driver_name = "sha256-ccree",
.mac_name = "hmac(sha256)",
.mac_driver_name = "hmac-sha256-ccree",
.blocksize = SHA256_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
},
},
.hash_mode = DRV_HASH_SHA256,
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
},
{
.name = "sha224",
.driver_name = "sha224-ccree",
.mac_name = "hmac(sha224)",
.mac_driver_name = "hmac-sha224-ccree",
.blocksize = SHA224_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
},
{
.name = "sha384",
.driver_name = "sha384-ccree",
.mac_name = "hmac(sha384)",
.mac_driver_name = "hmac-sha384-ccree",
.blocksize = SHA384_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
},
{
.name = "sha512",
.driver_name = "sha512-ccree",
.mac_name = "hmac(sha512)",
.mac_driver_name = "hmac-sha512-ccree",
.blocksize = SHA512_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA512,
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
},
{
.name = "md5",
.driver_name = "md5-ccree",
.mac_name = "hmac(md5)",
.mac_driver_name = "hmac-md5-ccree",
.blocksize = MD5_HMAC_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_MD5,
.hw_mode = DRV_HASH_HW_MD5,
.inter_digestsize = MD5_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
},
{
.name = "sm3",
.driver_name = "sm3-ccree",
.blocksize = SM3_BLOCK_SIZE,
.is_mac = false,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
.final = cc_hash_final,
.finup = cc_hash_finup,
.digest = cc_hash_digest,
.export = cc_hash_export,
.import = cc_hash_import,
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SM3,
.hw_mode = DRV_HASH_HW_SM3,
.inter_digestsize = SM3_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_713,
.std_body = CC_STD_OSCCA,
},
{
.mac_name = "xcbc(aes)",
.mac_driver_name = "xcbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
.final = cc_mac_final,
.finup = cc_mac_finup,
.digest = cc_mac_digest,
.setkey = cc_xcbc_setkey,
.export = cc_hash_export,
.import = cc_hash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
},
},
.hash_mode = DRV_HASH_NULL,
.hw_mode = DRV_CIPHER_XCBC_MAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
},
{
.mac_name = "cmac(aes)",
.mac_driver_name = "cmac-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
.is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
.final = cc_mac_final,
.finup = cc_mac_finup,
.digest = cc_mac_digest,
.setkey = cc_cmac_setkey,
.export = cc_hash_export,
.import = cc_hash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
},
},
.hash_mode = DRV_HASH_NULL,
.hw_mode = DRV_CIPHER_CMAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
},
};
static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
struct device *dev, bool keyed)
{
struct cc_hash_alg *t_crypto_alg;
struct crypto_alg *alg;
struct ahash_alg *halg;
t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
t_crypto_alg->ahash_alg = template->template_ahash;
halg = &t_crypto_alg->ahash_alg;
alg = &halg->halg.base;
if (keyed) {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->mac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->mac_driver_name);
} else {
halg->setkey = NULL;
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
}
alg->cra_module = THIS_MODULE;
alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding();
alg->cra_priority = CC_CRA_PRIO;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_exit = cc_cra_exit;
alg->cra_init = cc_cra_init;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_crypto_alg->hash_mode = template->hash_mode;
t_crypto_alg->hw_mode = template->hw_mode;
t_crypto_alg->inter_digestsize = template->inter_digestsize;
return t_crypto_alg;
}
static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
unsigned int size, u32 *sram_buff_ofs)
{
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
unsigned int larval_seq_len = 0;
int rc;
cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
return rc;
*sram_buff_ofs += size;
return 0;
}
int cc_init_hash_sram(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
int rc = 0;
/* Copy-to-sram digest-len */
rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
sizeof(cc_digest_len_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
if (large_sha_supported) {
/* Copy-to-sram digest-len for sha384/512 */
rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
sizeof(cc_digest_len_sha512_init),
&sram_buff_ofs);
if (rc)
goto init_digest_const_err;
}
/* The initial digests offset */
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
&sram_buff_ofs);
if (rc)
goto init_digest_const_err;
rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
&sram_buff_ofs);
if (rc)
goto init_digest_const_err;
rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
&sram_buff_ofs);
if (rc)
goto init_digest_const_err;
rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
&sram_buff_ofs);
if (rc)
goto init_digest_const_err;
if (sm3_supported) {
rc = cc_init_copy_sram(drvdata, cc_sm3_init,
sizeof(cc_sm3_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
}
if (large_sha_supported) {
rc = cc_init_copy_sram(drvdata, cc_sha384_init,
sizeof(cc_sha384_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
rc = cc_init_copy_sram(drvdata, cc_sha512_init,
sizeof(cc_sha512_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
}
init_digest_const_err:
return rc;
}
int cc_hash_alloc(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle;
u32 sram_buff;
u32 sram_size_to_alloc;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
int alg;
hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
if (!hash_handle)
return -ENOMEM;
INIT_LIST_HEAD(&hash_handle->hash_list);
drvdata->hash_handle = hash_handle;
sram_size_to_alloc = sizeof(cc_digest_len_init) +
sizeof(cc_md5_init) +
sizeof(cc_sha1_init) +
sizeof(cc_sha224_init) +
sizeof(cc_sha256_init);
if (drvdata->hw_rev >= CC_HW_REV_713)
sram_size_to_alloc += sizeof(cc_sm3_init);
if (drvdata->hw_rev >= CC_HW_REV_712)
sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
rc = -ENOMEM;
goto fail;
}
/* The initial digest-len offset */
hash_handle->digest_len_sram_addr = sram_buff;
/*must be set before the alg registration as it is being used there*/
rc = cc_init_hash_sram(drvdata);
if (rc) {
dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail;
}
/* ahash registration */
for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
struct cc_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
/* Check that the HW revision and variants are suitable */
if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
!(drvdata->std_bodies & driver_hash[alg].std_body))
continue;
if (driver_hash[alg].is_mac) {
/* register hmac version */
t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
goto fail;
}
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
hw_mode == DRV_CIPHER_CMAC)
continue;
/* register hash version */
t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
goto fail;
}
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
return 0;
fail:
cc_hash_free(drvdata);
return rc;
}
int cc_hash_free(struct cc_drvdata *drvdata)
{
struct cc_hash_alg *t_hash_alg, *hash_n;
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
entry) {
crypto_unregister_ahash(&t_hash_alg->ahash_alg);
list_del(&t_hash_alg->entry);
}
return 0;
}
static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
/* Setup XCBC MAC K1 */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
/* Setup XCBC MAC K2 */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
/* Setup XCBC MAC K3 */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
/* Loading MAC state */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
*seq_size = idx;
}
static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
/* Setup CMAC Key */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
ctx->key_params.keylen), NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], ctx->key_params.keylen);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
/* Load MAC state */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], ctx->key_params.keylen);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
*seq_size = idx;
}
static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
struct cc_hash_ctx *ctx, unsigned int flow_mode,
struct cc_hw_desc desc[], bool is_not_last_data,
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg),
areq_ctx->curr_sg->length, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
idx++;
} else {
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
dev_dbg(dev, " NULL mode\n");
/* nothing to build */
return;
}
/* bypass */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_len, NS_BIT);
set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
areq_ctx->mlli_params.mlli_len);
set_flow_mode(&desc[idx], BYPASS);
idx++;
/* process */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI,
ctx->drvdata->mlli_sram_addr,
areq_ctx->mlli_nents, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
idx++;
}
if (is_not_last_data)
set_din_not_last_indication(&desc[(idx - 1)]);
/* return updated desc sequence size */
*seq_size = idx;
}
static const void *cc_larval_digest(struct device *dev, u32 mode)
{
switch (mode) {
case DRV_HASH_MD5:
return cc_md5_init;
case DRV_HASH_SHA1:
return cc_sha1_init;
case DRV_HASH_SHA224:
return cc_sha224_init;
case DRV_HASH_SHA256:
return cc_sha256_init;
case DRV_HASH_SHA384:
return cc_sha384_init;
case DRV_HASH_SHA512:
return cc_sha512_init;
case DRV_HASH_SM3:
return cc_sm3_init;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
return cc_md5_init;
}
}
/**
* cc_larval_digest_addr() - Get the address of the initial digest in SRAM
* according to the given hash mode
*
* @drvdata: Associated device driver context
* @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
*
* Return:
* The address of the initial digest in SRAM
*/
u32 cc_larval_digest_addr(void *drvdata, u32 mode)
{
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
u32 addr;
switch (mode) {
case DRV_HASH_NULL:
break; /*Ignore*/
case DRV_HASH_MD5:
return (hash_handle->larval_digest_sram_addr);
case DRV_HASH_SHA1:
return (hash_handle->larval_digest_sram_addr +
sizeof(cc_md5_init));
case DRV_HASH_SHA224:
return (hash_handle->larval_digest_sram_addr +
sizeof(cc_md5_init) +
sizeof(cc_sha1_init));
case DRV_HASH_SHA256:
return (hash_handle->larval_digest_sram_addr +
sizeof(cc_md5_init) +
sizeof(cc_sha1_init) +
sizeof(cc_sha224_init));
case DRV_HASH_SM3:
return (hash_handle->larval_digest_sram_addr +
sizeof(cc_md5_init) +
sizeof(cc_sha1_init) +
sizeof(cc_sha224_init) +
sizeof(cc_sha256_init));
case DRV_HASH_SHA384:
addr = (hash_handle->larval_digest_sram_addr +
sizeof(cc_md5_init) +
sizeof(cc_sha1_init) +
sizeof(cc_sha224_init) +
sizeof(cc_sha256_init));
if (sm3_supported)
addr += sizeof(cc_sm3_init);
return addr;
case DRV_HASH_SHA512:
addr = (hash_handle->larval_digest_sram_addr +
sizeof(cc_md5_init) +
sizeof(cc_sha1_init) +
sizeof(cc_sha224_init) +
sizeof(cc_sha256_init) +
sizeof(cc_sha384_init));
if (sm3_supported)
addr += sizeof(cc_sm3_init);
return addr;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
}
/*This is valid wrong value to avoid kernel crash*/
return hash_handle->larval_digest_sram_addr;
}
u32 cc_digest_len_addr(void *drvdata, u32 mode)
{
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
u32 digest_len_addr = hash_handle->digest_len_sram_addr;
switch (mode) {
case DRV_HASH_SHA1:
case DRV_HASH_SHA224:
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(cc_digest_len_init);
default:
return digest_len_addr; /*to avoid kernel crash*/
}
}
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2023 Intel Corporation
*/
#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "regulatory.h"
#include "fw/runtime.h"
#include "fw/uefi.h"
#define GET_BIOS_TABLE(__name, ...) \
do { \
int ret = -ENOENT; \
if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
if (ret < 0) \
ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
return ret; \
} while (0)
#define IWL_BIOS_TABLE_LOADER(__name) \
int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
{GET_BIOS_TABLE(__name, fwrt); } \
IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
data_type * data) \
{GET_BIOS_TABLE(__name, fwrt, data); } \
IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
IWL_BIOS_TABLE_LOADER(wrds_table);
IWL_BIOS_TABLE_LOADER(ewrd_table);
IWL_BIOS_TABLE_LOADER(wgds_table);
IWL_BIOS_TABLE_LOADER(ppag_table);
IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
static const struct dmi_system_id dmi_ppag_approved_list[] = {
{ .ident = "HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
},
},
{ .ident = "SAMSUNG",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
},
},
{ .ident = "MSFT",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
{ .ident = "ASUS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
},
},
{ .ident = "GOOGLE-ASUS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
},
},
{ .ident = "GOOGLE-SAMSUNG",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
},
},
{ .ident = "DELL",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
},
},
{ .ident = "DELL",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
},
},
{ .ident = "RAZER",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
},
},
{ .ident = "Honor",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
},
},
{}
};
static const struct dmi_system_id dmi_tas_approved_list[] = {
{ .ident = "HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
},
},
{ .ident = "SAMSUNG",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
},
},
{ .ident = "LENOVO",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
},
},
{ .ident = "DELL",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
},
},
{ .ident = "MSFT",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
{ .ident = "Acer",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
},
},
{ .ident = "ASUS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
},
},
{ .ident = "MSI",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
},
},
{ .ident = "Honor",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
},
},
/* keep last */
{}
};
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
{
/*
* The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
* earlier firmware versions. Unfortunately, we don't have a
* TLV API flag to rely on, so rely on the major version which
* is in the first byte of ucode_ver. This was implemented
* initially on version 38 and then backported to 17. It was
* also backported to 29, but only for 7265D devices. The
* intention was to have it in 36 as well, but not all 8000
* family got this feature enabled. The 8000 family is the
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset *table,
u32 n_bands, u32 n_profiles)
{
int i, j;
if (!fwrt->geo_enabled)
return -ENODATA;
if (!iwl_sar_geo_support(fwrt))
return -EOPNOTSUPP;
for (i = 0; i < n_profiles; i++) {
for (j = 0; j < n_bands; j++) {
struct iwl_per_chain_offset *chain =
&table[i * n_bands + j];
chain->max_tx_power =
cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
chain->chain_a =
fwrt->geo_profiles[i].bands[j].chains[0];
chain->chain_b =
fwrt->geo_profiles[i].bands[j].chains[1];
IWL_DEBUG_RADIO(fwrt,
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
i, j,
fwrt->geo_profiles[i].bands[j].chains[0],
fwrt->geo_profiles[i].bands[j].chains[1],
fwrt->geo_profiles[i].bands[j].max);
}
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
__le16 *per_chain, u32 n_subbands,
int prof_a, int prof_b)
{
int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
int i, j;
for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
struct iwl_sar_profile *prof;
/* don't allow SAR to be disabled (profile 0 means disable) */
if (profs[i] == 0)
return -EPERM;
/* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
return -EINVAL;
/* profiles go from 1 to 4, so decrement to access the array */
prof = &fwrt->sar_profiles[profs[i] - 1];
/* if the profile is disabled, do nothing */
if (!prof->enabled) {
IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
profs[i]);
/*
* if one of the profiles is disabled, we
* ignore all of them and return 1 to
* differentiate disabled from other failures.
*/
return 1;
}
IWL_DEBUG_INFO(fwrt,
"SAR EWRD: chain %d profile index %d\n",
i, profs[i]);
IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
for (j = 0; j < n_subbands; j++) {
per_chain[i * n_subbands + j] =
cpu_to_le16(prof->chains[i].subbands[j]);
IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
j, prof->chains[i].subbands[j]);
}
}
return 0;
}
int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
__le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b)
{
int i, ret = 0;
for (i = 0; i < n_tables; i++) {
ret = iwl_sar_fill_table(fwrt,
&per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
n_subbands, prof_a, prof_b);
if (ret)
break;
}
return ret;
}
IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
int subband)
{
s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
if ((subband == 0 &&
(ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
(subband != 0 &&
(ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
return false;
}
return true;
}
int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd, int *cmd_size)
{
u8 cmd_ver;
int i, j, num_sub_bands;
s8 *gain;
bool send_ppag_always;
/* many firmware images for JF lie about this */
if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
return -EOPNOTSUPP;
if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(fwrt,
"PPAG capability not supported by FW, command not sent.\n");
return -EINVAL;
}
cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD), 1);
/*
* Starting from ver 4, driver needs to send the PPAG CMD regardless
* if PPAG is enabled/disabled or valid/invalid.
*/
send_ppag_always = cmd_ver > 3;
/* Don't send PPAG if it is disabled */
if (!send_ppag_always && !fwrt->ppag_flags) {
IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
return -EINVAL;
}
/* The 'flags' field is the same in v1 and in v2 so we can just
* use v1 to access it.
*/
cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
if (fwrt->ppag_ver >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_ver);
}
} else if (cmd_ver >= 2 && cmd_ver <= 6) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
if (fwrt->ppag_ver == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is 0, send padded table\n");
}
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
}
/* ppag mode */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits were read from bios: %d\n",
le32_to_cpu(cmd->v1.flags));
if (cmd_ver == 5)
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
else if (cmd_ver < 5)
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
if ((cmd_ver == 1 &&
!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
(cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
le32_to_cpu(cmd->v1.flags));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
if (!send_ppag_always &&
!iwl_ppag_value_valid(fwrt, i, j))
return -EINVAL;
gain[i * num_sub_bands + j] =
fwrt->ppag_chains[i].subbands[j];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
i, j, gain[i * num_sub_bands + j]);
}
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
{
if (!dmi_check_system(dmi_ppag_approved_list)) {
IWL_DEBUG_RADIO(fwrt,
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
fwrt->ppag_flags = 0;
return false;
}
return true;
}
IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
bool iwl_is_tas_approved(void)
{
return dmi_check_system(dmi_tas_approved_list);
}
IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
struct iwl_tas_data *tas_data,
const u32 tas_selection)
{
u8 override_iec = u32_get_bits(tas_selection,
IWL_WTAS_OVERRIDE_IEC_MSK);
u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
tas_selection);
tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
tas_data->override_tas_iec = override_iec;
tas_data->enable_tas_iec = enabled_iec;
return enabled;
}
static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u32 val;
__le32 config_bitmap = 0;
switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
case IWL_CFG_RF_TYPE_JF1:
case IWL_CFG_RF_TYPE_JF2:
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
&val);
if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
break;
default:
break;
}
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
if (!ret) {
if (val == DSM_VALUE_SRD_PASSIVE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
else if (val == DSM_VALUE_SRD_DISABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
}
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
&val);
/*
* China 2022 enable if the BIOS object does not exist or
* if it is enabled in BIOS.
*/
if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
}
return config_bitmap;
}
static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
{
size_t cmd_size;
switch (cmd_ver) {
case 12:
case 11:
cmd_size = sizeof(struct iwl_lari_config_change_cmd);
break;
case 10:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10);
break;
case 9:
case 8:
case 7:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
break;
case 6:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
break;
case 5:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
break;
case 4:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
break;
case 3:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
break;
case 2:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
break;
default:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
break;
}
return cmd_size;
}
int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
struct iwl_lari_config_change_cmd *cmd,
size_t *cmd_size)
{
int ret;
u32 value;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
memset(cmd, 0, sizeof(*cmd));
*cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
if (!ret)
cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret) {
if (cmd_ver < 9)
value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8;
else
value &= DSM_UNII4_ALLOW_BITMAP;
cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
}
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
if (cmd_ver < 12)
value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
cmd->chan_state_active_bitmap = cpu_to_le32(value);
}
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
if (!ret)
cmd->force_disable_channels_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
&value);
if (!ret)
cmd->edt_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_wbem(fwrt, &value);
if (!ret)
cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
if (!ret)
cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
if (cmd->config_bitmap ||
cmd->oem_uhb_allow_bitmap ||
cmd->oem_11ax_allow_bitmap ||
cmd->oem_unii4_allow_bitmap ||
cmd->chan_state_active_bitmap ||
cmd->force_disable_channels_bitmap ||
cmd->edt_bitmap ||
cmd->oem_320mhz_allow_bitmap ||
cmd->oem_11be_allow_bitmap) {
IWL_DEBUG_RADIO(fwrt,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
le32_to_cpu(cmd->config_bitmap),
le32_to_cpu(cmd->oem_11ax_allow_bitmap));
IWL_DEBUG_RADIO(fwrt,
"sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
le32_to_cpu(cmd->oem_unii4_allow_bitmap),
le32_to_cpu(cmd->chan_state_active_bitmap),
cmd_ver);
IWL_DEBUG_RADIO(fwrt,
"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
le32_to_cpu(cmd->oem_uhb_allow_bitmap),
le32_to_cpu(cmd->force_disable_channels_bitmap));
IWL_DEBUG_RADIO(fwrt,
"sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
le32_to_cpu(cmd->edt_bitmap),
le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
IWL_DEBUG_RADIO(fwrt,
"sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
le32_to_cpu(cmd->oem_11be_allow_bitmap));
} else {
return 1;
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fill_lari_config);
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value)
{
GET_BIOS_TABLE(dsm, fwrt, func, value);
}
IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
{
/* Some kind of regulatory mess means we need to currently disallow
* puncturing in the US and Canada unless enabled in BIOS.
*/
switch (mcc) {
case IWL_MCC_US:
return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK;
case IWL_MCC_CANADA:
return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK;
default:
return true;
}
}
IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
|
// SPDX-License-Identifier: GPL-2.0
/*
* Device Tree file for Seagate NAS 4-Bay (Armada 370 SoC).
*
* Copyright (C) 2015 Seagate
*
* Author: Vincent Donnefort <[email protected]>
*/
/*
* Here are some information allowing to identify the device:
*
* Product name : Seagate NAS 4-Bay
* Code name (board/PCB) : Dart 4-Bay
* Model name (case sticker) : SRPD40
* Material desc (product spec) : STCUxxxxxxx
*/
/dts-v1/;
#include "armada-370-seagate-nas-xbay.dtsi"
#include <dt-bindings/leds/leds-ns2.h>
/ {
model = "Seagate NAS 4-Bay (Dart, SRPD40)";
compatible = "seagate,dart-4", "marvell,armada370", "marvell,armada-370-xp";
soc {
internal-regs {
ethernet@74000 {
status = "okay";
pinctrl-0 = <&ge1_rgmii_pins>;
pinctrl-names = "default";
phy = <&phy1>;
phy-mode = "rgmii-id";
};
i2c@11000 {
/* I2C GPIO expander (PCA9554A) */
pca9554: pca9554@21 {
compatible = "nxp,pca9554";
reg = <0x21>;
#gpio-cells = <2>;
gpio-controller;
};
};
};
};
regulator-3 {
compatible = "regulator-fixed";
regulator-name = "SATA2 power";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
enable-active-high;
regulator-always-on;
regulator-boot-on;
gpio = <&pca9554 6 GPIO_ACTIVE_HIGH>;
};
regulator-4 {
compatible = "regulator-fixed";
regulator-name = "SATA3 power";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
enable-active-high;
regulator-always-on;
regulator-boot-on;
gpio = <&pca9554 7 GPIO_ACTIVE_HIGH>;
};
gpio-leds {
led-red-sata2 {
label = "dart:red:sata2";
gpios = <&pca9554 0 GPIO_ACTIVE_LOW>;
};
led-red-sata3 {
label = "dart:red:sata3";
gpios = <&pca9554 3 GPIO_ACTIVE_LOW>;
};
};
leds-ns2 {
compatible = "lacie,ns2-leds";
white-sata2 {
label = "dart:white:sata2";
cmd-gpio = <&pca9554 1 GPIO_ACTIVE_HIGH>;
slow-gpio = <&pca9554 2 GPIO_ACTIVE_HIGH>;
num-modes = <4>;
modes-map = <NS_V2_LED_SATA 0 0
NS_V2_LED_OFF 0 1
NS_V2_LED_ON 1 0
NS_V2_LED_ON 1 1>;
};
white-sata3 {
label = "dart:white:sata3";
cmd-gpio = <&pca9554 4 GPIO_ACTIVE_HIGH>;
slow-gpio = <&pca9554 5 GPIO_ACTIVE_HIGH>;
num-modes = <4>;
modes-map = <NS_V2_LED_SATA 0 0
NS_V2_LED_OFF 0 1
NS_V2_LED_ON 1 0
NS_V2_LED_ON 1 1>;
};
};
gpio-fan {
gpio-fan,speed-map =
< 0 3>,
< 800 2>,
<1050 1>,
<1300 0>;
};
};
&pciec {
/* SATA AHCI controller 88SE9170 */
pcie@1,0 {
status = "okay";
};
};
&mdio {
phy1: ethernet-phy@1 {
reg = <1>;
};
};
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HDMI driver definition for TI OMAP5 processors.
*
* Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _HDMI5_CORE_H_
#define _HDMI5_CORE_H_
#include "hdmi.h"
/* HDMI IP Core System */
/* HDMI Identification */
#define HDMI_CORE_DESIGN_ID 0x00000
#define HDMI_CORE_REVISION_ID 0x00004
#define HDMI_CORE_PRODUCT_ID0 0x00008
#define HDMI_CORE_PRODUCT_ID1 0x0000C
#define HDMI_CORE_CONFIG0_ID 0x00010
#define HDMI_CORE_CONFIG1_ID 0x00014
#define HDMI_CORE_CONFIG2_ID 0x00018
#define HDMI_CORE_CONFIG3_ID 0x0001C
/* HDMI Interrupt */
#define HDMI_CORE_IH_FC_STAT0 0x00400
#define HDMI_CORE_IH_FC_STAT1 0x00404
#define HDMI_CORE_IH_FC_STAT2 0x00408
#define HDMI_CORE_IH_AS_STAT0 0x0040C
#define HDMI_CORE_IH_PHY_STAT0 0x00410
#define HDMI_CORE_IH_I2CM_STAT0 0x00414
#define HDMI_CORE_IH_CEC_STAT0 0x00418
#define HDMI_CORE_IH_VP_STAT0 0x0041C
#define HDMI_CORE_IH_I2CMPHY_STAT0 0x00420
#define HDMI_CORE_IH_MUTE 0x007FC
/* HDMI Video Sampler */
#define HDMI_CORE_TX_INVID0 0x00800
#define HDMI_CORE_TX_INSTUFFING 0x00804
#define HDMI_CORE_TX_RGYDATA0 0x00808
#define HDMI_CORE_TX_RGYDATA1 0x0080C
#define HDMI_CORE_TX_RCRDATA0 0x00810
#define HDMI_CORE_TX_RCRDATA1 0x00814
#define HDMI_CORE_TX_BCBDATA0 0x00818
#define HDMI_CORE_TX_BCBDATA1 0x0081C
/* HDMI Video Packetizer */
#define HDMI_CORE_VP_STATUS 0x02000
#define HDMI_CORE_VP_PR_CD 0x02004
#define HDMI_CORE_VP_STUFF 0x02008
#define HDMI_CORE_VP_REMAP 0x0200C
#define HDMI_CORE_VP_CONF 0x02010
#define HDMI_CORE_VP_STAT 0x02014
#define HDMI_CORE_VP_INT 0x02018
#define HDMI_CORE_VP_MASK 0x0201C
#define HDMI_CORE_VP_POL 0x02020
/* Frame Composer */
#define HDMI_CORE_FC_INVIDCONF 0x04000
#define HDMI_CORE_FC_INHACTIV0 0x04004
#define HDMI_CORE_FC_INHACTIV1 0x04008
#define HDMI_CORE_FC_INHBLANK0 0x0400C
#define HDMI_CORE_FC_INHBLANK1 0x04010
#define HDMI_CORE_FC_INVACTIV0 0x04014
#define HDMI_CORE_FC_INVACTIV1 0x04018
#define HDMI_CORE_FC_INVBLANK 0x0401C
#define HDMI_CORE_FC_HSYNCINDELAY0 0x04020
#define HDMI_CORE_FC_HSYNCINDELAY1 0x04024
#define HDMI_CORE_FC_HSYNCINWIDTH0 0x04028
#define HDMI_CORE_FC_HSYNCINWIDTH1 0x0402C
#define HDMI_CORE_FC_VSYNCINDELAY 0x04030
#define HDMI_CORE_FC_VSYNCINWIDTH 0x04034
#define HDMI_CORE_FC_INFREQ0 0x04038
#define HDMI_CORE_FC_INFREQ1 0x0403C
#define HDMI_CORE_FC_INFREQ2 0x04040
#define HDMI_CORE_FC_CTRLDUR 0x04044
#define HDMI_CORE_FC_EXCTRLDUR 0x04048
#define HDMI_CORE_FC_EXCTRLSPAC 0x0404C
#define HDMI_CORE_FC_CH0PREAM 0x04050
#define HDMI_CORE_FC_CH1PREAM 0x04054
#define HDMI_CORE_FC_CH2PREAM 0x04058
#define HDMI_CORE_FC_AVICONF3 0x0405C
#define HDMI_CORE_FC_GCP 0x04060
#define HDMI_CORE_FC_AVICONF0 0x04064
#define HDMI_CORE_FC_AVICONF1 0x04068
#define HDMI_CORE_FC_AVICONF2 0x0406C
#define HDMI_CORE_FC_AVIVID 0x04070
#define HDMI_CORE_FC_AVIETB0 0x04074
#define HDMI_CORE_FC_AVIETB1 0x04078
#define HDMI_CORE_FC_AVISBB0 0x0407C
#define HDMI_CORE_FC_AVISBB1 0x04080
#define HDMI_CORE_FC_AVIELB0 0x04084
#define HDMI_CORE_FC_AVIELB1 0x04088
#define HDMI_CORE_FC_AVISRB0 0x0408C
#define HDMI_CORE_FC_AVISRB1 0x04090
#define HDMI_CORE_FC_AUDICONF0 0x04094
#define HDMI_CORE_FC_AUDICONF1 0x04098
#define HDMI_CORE_FC_AUDICONF2 0x0409C
#define HDMI_CORE_FC_AUDICONF3 0x040A0
#define HDMI_CORE_FC_VSDIEEEID0 0x040A4
#define HDMI_CORE_FC_VSDSIZE 0x040A8
#define HDMI_CORE_FC_VSDIEEEID1 0x040C0
#define HDMI_CORE_FC_VSDIEEEID2 0x040C4
#define HDMI_CORE_FC_VSDPAYLOAD(n) (n * 4 + 0x040C8)
#define HDMI_CORE_FC_SPDVENDORNAME(n) (n * 4 + 0x04128)
#define HDMI_CORE_FC_SPDPRODUCTNAME(n) (n * 4 + 0x04148)
#define HDMI_CORE_FC_SPDDEVICEINF 0x04188
#define HDMI_CORE_FC_AUDSCONF 0x0418C
#define HDMI_CORE_FC_AUDSSTAT 0x04190
#define HDMI_CORE_FC_AUDSV 0x04194
#define HDMI_CORE_FC_AUDSU 0x04198
#define HDMI_CORE_FC_AUDSCHNLS(n) (n * 4 + 0x0419C)
#define HDMI_CORE_FC_CTRLQHIGH 0x041CC
#define HDMI_CORE_FC_CTRLQLOW 0x041D0
#define HDMI_CORE_FC_ACP0 0x041D4
#define HDMI_CORE_FC_ACP(n) ((16-n) * 4 + 0x04208)
#define HDMI_CORE_FC_ISCR1_0 0x04248
#define HDMI_CORE_FC_ISCR1(n) ((16-n) * 4 + 0x0424C)
#define HDMI_CORE_FC_ISCR2(n) ((15-n) * 4 + 0x0428C)
#define HDMI_CORE_FC_DATAUTO0 0x042CC
#define HDMI_CORE_FC_DATAUTO1 0x042D0
#define HDMI_CORE_FC_DATAUTO2 0x042D4
#define HDMI_CORE_FC_DATMAN 0x042D8
#define HDMI_CORE_FC_DATAUTO3 0x042DC
#define HDMI_CORE_FC_RDRB(n) (n * 4 + 0x042E0)
#define HDMI_CORE_FC_STAT0 0x04340
#define HDMI_CORE_FC_INT0 0x04344
#define HDMI_CORE_FC_MASK0 0x04348
#define HDMI_CORE_FC_POL0 0x0434C
#define HDMI_CORE_FC_STAT1 0x04350
#define HDMI_CORE_FC_INT1 0x04354
#define HDMI_CORE_FC_MASK1 0x04358
#define HDMI_CORE_FC_POL1 0x0435C
#define HDMI_CORE_FC_STAT2 0x04360
#define HDMI_CORE_FC_INT2 0x04364
#define HDMI_CORE_FC_MASK2 0x04368
#define HDMI_CORE_FC_POL2 0x0436C
#define HDMI_CORE_FC_PRCONF 0x04380
#define HDMI_CORE_FC_GMD_STAT 0x04400
#define HDMI_CORE_FC_GMD_EN 0x04404
#define HDMI_CORE_FC_GMD_UP 0x04408
#define HDMI_CORE_FC_GMD_CONF 0x0440C
#define HDMI_CORE_FC_GMD_HB 0x04410
#define HDMI_CORE_FC_GMD_PB(n) (n * 4 + 0x04414)
#define HDMI_CORE_FC_DBGFORCE 0x04800
#define HDMI_CORE_FC_DBGAUD0CH0 0x04804
#define HDMI_CORE_FC_DBGAUD1CH0 0x04808
#define HDMI_CORE_FC_DBGAUD2CH0 0x0480C
#define HDMI_CORE_FC_DBGAUD0CH1 0x04810
#define HDMI_CORE_FC_DBGAUD1CH1 0x04814
#define HDMI_CORE_FC_DBGAUD2CH1 0x04818
#define HDMI_CORE_FC_DBGAUD0CH2 0x0481C
#define HDMI_CORE_FC_DBGAUD1CH2 0x04820
#define HDMI_CORE_FC_DBGAUD2CH2 0x04824
#define HDMI_CORE_FC_DBGAUD0CH3 0x04828
#define HDMI_CORE_FC_DBGAUD1CH3 0x0482C
#define HDMI_CORE_FC_DBGAUD2CH3 0x04830
#define HDMI_CORE_FC_DBGAUD0CH4 0x04834
#define HDMI_CORE_FC_DBGAUD1CH4 0x04838
#define HDMI_CORE_FC_DBGAUD2CH4 0x0483C
#define HDMI_CORE_FC_DBGAUD0CH5 0x04840
#define HDMI_CORE_FC_DBGAUD1CH5 0x04844
#define HDMI_CORE_FC_DBGAUD2CH5 0x04848
#define HDMI_CORE_FC_DBGAUD0CH6 0x0484C
#define HDMI_CORE_FC_DBGAUD1CH6 0x04850
#define HDMI_CORE_FC_DBGAUD2CH6 0x04854
#define HDMI_CORE_FC_DBGAUD0CH7 0x04858
#define HDMI_CORE_FC_DBGAUD1CH7 0x0485C
#define HDMI_CORE_FC_DBGAUD2CH7 0x04860
#define HDMI_CORE_FC_DBGTMDS0 0x04864
#define HDMI_CORE_FC_DBGTMDS1 0x04868
#define HDMI_CORE_FC_DBGTMDS2 0x0486C
#define HDMI_CORE_PHY_MASK0 0x0C018
#define HDMI_CORE_PHY_I2CM_INT_ADDR 0x0C09C
#define HDMI_CORE_PHY_I2CM_CTLINT_ADDR 0x0C0A0
/* HDMI Audio */
#define HDMI_CORE_AUD_CONF0 0x0C400
#define HDMI_CORE_AUD_CONF1 0x0C404
#define HDMI_CORE_AUD_INT 0x0C408
#define HDMI_CORE_AUD_N1 0x0C800
#define HDMI_CORE_AUD_N2 0x0C804
#define HDMI_CORE_AUD_N3 0x0C808
#define HDMI_CORE_AUD_CTS1 0x0C80C
#define HDMI_CORE_AUD_CTS2 0x0C810
#define HDMI_CORE_AUD_CTS3 0x0C814
#define HDMI_CORE_AUD_INCLKFS 0x0C818
#define HDMI_CORE_AUD_CC08 0x0CC08
#define HDMI_CORE_AUD_GP_CONF0 0x0D400
#define HDMI_CORE_AUD_GP_CONF1 0x0D404
#define HDMI_CORE_AUD_GP_CONF2 0x0D408
#define HDMI_CORE_AUD_D010 0x0D010
#define HDMI_CORE_AUD_GP_STAT 0x0D40C
#define HDMI_CORE_AUD_GP_INT 0x0D410
#define HDMI_CORE_AUD_GP_POL 0x0D414
#define HDMI_CORE_AUD_GP_MASK 0x0D418
/* HDMI Main Controller */
#define HDMI_CORE_MC_CLKDIS 0x10004
#define HDMI_CORE_MC_SWRSTZREQ 0x10008
#define HDMI_CORE_MC_FLOWCTRL 0x10010
#define HDMI_CORE_MC_PHYRSTZ 0x10014
#define HDMI_CORE_MC_LOCKONCLOCK 0x10018
/* HDMI COLOR SPACE CONVERTER */
#define HDMI_CORE_CSC_CFG 0x10400
#define HDMI_CORE_CSC_SCALE 0x10404
#define HDMI_CORE_CSC_COEF_A1_MSB 0x10408
#define HDMI_CORE_CSC_COEF_A1_LSB 0x1040C
#define HDMI_CORE_CSC_COEF_A2_MSB 0x10410
#define HDMI_CORE_CSC_COEF_A2_LSB 0x10414
#define HDMI_CORE_CSC_COEF_A3_MSB 0x10418
#define HDMI_CORE_CSC_COEF_A3_LSB 0x1041C
#define HDMI_CORE_CSC_COEF_A4_MSB 0x10420
#define HDMI_CORE_CSC_COEF_A4_LSB 0x10424
#define HDMI_CORE_CSC_COEF_B1_MSB 0x10428
#define HDMI_CORE_CSC_COEF_B1_LSB 0x1042C
#define HDMI_CORE_CSC_COEF_B2_MSB 0x10430
#define HDMI_CORE_CSC_COEF_B2_LSB 0x10434
#define HDMI_CORE_CSC_COEF_B3_MSB 0x10438
#define HDMI_CORE_CSC_COEF_B3_LSB 0x1043C
#define HDMI_CORE_CSC_COEF_B4_MSB 0x10440
#define HDMI_CORE_CSC_COEF_B4_LSB 0x10444
#define HDMI_CORE_CSC_COEF_C1_MSB 0x10448
#define HDMI_CORE_CSC_COEF_C1_LSB 0x1044C
#define HDMI_CORE_CSC_COEF_C2_MSB 0x10450
#define HDMI_CORE_CSC_COEF_C2_LSB 0x10454
#define HDMI_CORE_CSC_COEF_C3_MSB 0x10458
#define HDMI_CORE_CSC_COEF_C3_LSB 0x1045C
#define HDMI_CORE_CSC_COEF_C4_MSB 0x10460
#define HDMI_CORE_CSC_COEF_C4_LSB 0x10464
/* HDMI HDCP */
#define HDMI_CORE_HDCP_MASK 0x14020
/* HDMI CEC */
#define HDMI_CORE_CEC_MASK 0x17408
/* HDMI I2C Master */
#define HDMI_CORE_I2CM_SLAVE 0x157C8
#define HDMI_CORE_I2CM_ADDRESS 0x157CC
#define HDMI_CORE_I2CM_DATAO 0x157D0
#define HDMI_CORE_I2CM_DATAI 0X157D4
#define HDMI_CORE_I2CM_OPERATION 0x157D8
#define HDMI_CORE_I2CM_INT 0x157DC
#define HDMI_CORE_I2CM_CTLINT 0x157E0
#define HDMI_CORE_I2CM_DIV 0x157E4
#define HDMI_CORE_I2CM_SEGADDR 0x157E8
#define HDMI_CORE_I2CM_SOFTRSTZ 0x157EC
#define HDMI_CORE_I2CM_SEGPTR 0x157F0
#define HDMI_CORE_I2CM_SS_SCL_HCNT_1_ADDR 0x157F4
#define HDMI_CORE_I2CM_SS_SCL_HCNT_0_ADDR 0x157F8
#define HDMI_CORE_I2CM_SS_SCL_LCNT_1_ADDR 0x157FC
#define HDMI_CORE_I2CM_SS_SCL_LCNT_0_ADDR 0x15800
#define HDMI_CORE_I2CM_FS_SCL_HCNT_1_ADDR 0x15804
#define HDMI_CORE_I2CM_FS_SCL_HCNT_0_ADDR 0x15808
#define HDMI_CORE_I2CM_FS_SCL_LCNT_1_ADDR 0x1580C
#define HDMI_CORE_I2CM_FS_SCL_LCNT_0_ADDR 0x15810
#define HDMI_CORE_I2CM_SDA_HOLD_ADDR 0x15814
enum hdmi_core_packet_mode {
HDMI_PACKETMODERESERVEDVALUE = 0,
HDMI_PACKETMODE24BITPERPIXEL = 4,
HDMI_PACKETMODE30BITPERPIXEL = 5,
HDMI_PACKETMODE36BITPERPIXEL = 6,
HDMI_PACKETMODE48BITPERPIXEL = 7,
};
struct hdmi_core_vid_config {
struct hdmi_config v_fc_config;
enum hdmi_core_packet_mode packet_mode;
int data_enable_pol;
int vblank_osc;
int hblank;
int vblank;
};
struct csc_table {
u16 a1, a2, a3, a4;
u16 b1, b2, b3, b4;
u16 c1, c2, c3, c4;
};
void hdmi5_core_ddc_init(struct hdmi_core_data *core);
int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct omap_dss_audio *audio, u32 pclk);
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* AM824 format in Audio and Music Data Transmission Protocol (IEC 61883-6)
*
* Copyright (c) Clemens Ladisch <[email protected]>
* Copyright (c) 2015 Takashi Sakamoto <[email protected]>
*/
#include <linux/slab.h>
#include "amdtp-am824.h"
#define CIP_FMT_AM 0x10
/* "Clock-based rate control mode" is just supported. */
#define AMDTP_FDF_AM824 0x00
/*
* Nominally 3125 bytes/second, but the MIDI port's clock might be
* 1% too slow, and the bus clock 100 ppm too fast.
*/
#define MIDI_BYTES_PER_SECOND 3093
/*
* Several devices look only at the first eight data blocks.
* In any case, this is more than enough for the MIDI data rate.
*/
#define MAX_MIDI_RX_BLOCKS 8
struct amdtp_am824 {
struct snd_rawmidi_substream *midi[AM824_MAX_CHANNELS_FOR_MIDI * 8];
int midi_fifo_limit;
int midi_fifo_used[AM824_MAX_CHANNELS_FOR_MIDI * 8];
unsigned int pcm_channels;
unsigned int midi_ports;
u8 pcm_positions[AM824_MAX_CHANNELS_FOR_PCM];
u8 midi_position;
};
/**
* amdtp_am824_set_parameters - set stream parameters
* @s: the AMDTP stream to configure
* @rate: the sample rate
* @pcm_channels: the number of PCM samples in each data block, to be encoded
* as AM824 multi-bit linear audio
* @midi_ports: the number of MIDI ports (i.e., MPX-MIDI Data Channels)
* @double_pcm_frames: one data block transfers two PCM frames
*
* The parameters must be set before the stream is started, and must not be
* changed while the stream is running.
*/
int amdtp_am824_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int pcm_channels,
unsigned int midi_ports,
bool double_pcm_frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int midi_channels;
unsigned int pcm_frame_multiplier;
int i, err;
if (amdtp_stream_running(s))
return -EINVAL;
if (pcm_channels > AM824_MAX_CHANNELS_FOR_PCM)
return -EINVAL;
midi_channels = DIV_ROUND_UP(midi_ports, 8);
if (midi_channels > AM824_MAX_CHANNELS_FOR_MIDI)
return -EINVAL;
if (WARN_ON(amdtp_stream_running(s)) ||
WARN_ON(pcm_channels > AM824_MAX_CHANNELS_FOR_PCM) ||
WARN_ON(midi_channels > AM824_MAX_CHANNELS_FOR_MIDI))
return -EINVAL;
/*
* In IEC 61883-6, one data block represents one event. In ALSA, one
* event equals to one PCM frame. But Dice has a quirk at higher
* sampling rate to transfer two PCM frames in one data block.
*/
if (double_pcm_frames)
pcm_frame_multiplier = 2;
else
pcm_frame_multiplier = 1;
err = amdtp_stream_set_parameters(s, rate, pcm_channels + midi_channels,
pcm_frame_multiplier);
if (err < 0)
return err;
if (s->direction == AMDTP_OUT_STREAM)
s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc;
p->pcm_channels = pcm_channels;
p->midi_ports = midi_ports;
/* init the position map for PCM and MIDI channels */
for (i = 0; i < pcm_channels; i++)
p->pcm_positions[i] = i;
p->midi_position = p->pcm_channels;
/*
* We do not know the actual MIDI FIFO size of most devices. Just
* assume two bytes, i.e., one byte can be received over the bus while
* the previous one is transmitted over MIDI.
* (The value here is adjusted for midi_ratelimit_per_packet().)
*/
p->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_am824_set_parameters);
/**
* amdtp_am824_set_pcm_position - set an index of data channel for a channel
* of PCM frame
* @s: the AMDTP stream
* @index: the index of data channel in an data block
* @position: the channel of PCM frame
*/
void amdtp_am824_set_pcm_position(struct amdtp_stream *s, unsigned int index,
unsigned int position)
{
struct amdtp_am824 *p = s->protocol;
if (index < p->pcm_channels)
p->pcm_positions[index] = position;
}
EXPORT_SYMBOL_GPL(amdtp_am824_set_pcm_position);
/**
* amdtp_am824_set_midi_position - set a index of data channel for MIDI
* conformant data channel
* @s: the AMDTP stream
* @position: the index of data channel in an data block
*/
void amdtp_am824_set_midi_position(struct amdtp_stream *s,
unsigned int position)
{
struct amdtp_am824 *p = s->protocol;
p->midi_position = position;
}
EXPORT_SYMBOL_GPL(amdtp_am824_set_midi_position);
static void write_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames,
unsigned int pcm_frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int channels = p->pcm_channels;
struct snd_pcm_runtime *runtime = pcm->runtime;
unsigned int pcm_buffer_pointer;
int remaining_frames;
const u32 *src;
int i, c;
pcm_buffer_pointer = s->pcm_buffer_pointer + pcm_frames;
pcm_buffer_pointer %= runtime->buffer_size;
src = (void *)runtime->dma_area +
frames_to_bytes(runtime, pcm_buffer_pointer);
remaining_frames = runtime->buffer_size - pcm_buffer_pointer;
for (i = 0; i < frames; ++i) {
for (c = 0; c < channels; ++c) {
buffer[p->pcm_positions[c]] =
cpu_to_be32((*src >> 8) | 0x40000000);
src++;
}
buffer += s->data_block_quadlets;
if (--remaining_frames == 0)
src = (void *)runtime->dma_area;
}
}
static void read_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames,
unsigned int pcm_frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int channels = p->pcm_channels;
struct snd_pcm_runtime *runtime = pcm->runtime;
unsigned int pcm_buffer_pointer;
int remaining_frames;
u32 *dst;
int i, c;
pcm_buffer_pointer = s->pcm_buffer_pointer + pcm_frames;
pcm_buffer_pointer %= runtime->buffer_size;
dst = (void *)runtime->dma_area +
frames_to_bytes(runtime, pcm_buffer_pointer);
remaining_frames = runtime->buffer_size - pcm_buffer_pointer;
for (i = 0; i < frames; ++i) {
for (c = 0; c < channels; ++c) {
*dst = be32_to_cpu(buffer[p->pcm_positions[c]]) << 8;
dst++;
}
buffer += s->data_block_quadlets;
if (--remaining_frames == 0)
dst = (void *)runtime->dma_area;
}
}
static void write_pcm_silence(struct amdtp_stream *s,
__be32 *buffer, unsigned int frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int i, c, channels = p->pcm_channels;
for (i = 0; i < frames; ++i) {
for (c = 0; c < channels; ++c)
buffer[p->pcm_positions[c]] = cpu_to_be32(0x40000000);
buffer += s->data_block_quadlets;
}
}
/**
* amdtp_am824_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream for AM824 data block, must be initialized.
* @runtime: the PCM substream runtime
*
*/
int amdtp_am824_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
int err;
err = amdtp_stream_add_pcm_hw_constraints(s, runtime);
if (err < 0)
return err;
/* AM824 in IEC 61883-6 can deliver 24bit data. */
return snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
}
EXPORT_SYMBOL_GPL(amdtp_am824_add_pcm_hw_constraints);
/**
* amdtp_am824_midi_trigger - start/stop playback/capture with a MIDI device
* @s: the AMDTP stream
* @port: index of MIDI port
* @midi: the MIDI device to be started, or %NULL to stop the current device
*
* Call this function on a running isochronous stream to enable the actual
* transmission of MIDI data. This function should be called from the MIDI
* device's .trigger callback.
*/
void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi)
{
struct amdtp_am824 *p = s->protocol;
if (port < p->midi_ports)
WRITE_ONCE(p->midi[port], midi);
}
EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger);
/*
* To avoid sending MIDI bytes at too high a rate, assume that the receiving
* device has a FIFO, and track how much it is filled. This values increases
* by one whenever we send one byte in a packet, but the FIFO empties at
* a constant rate independent of our packet rate. One packet has syt_interval
* samples, so the number of bytes that empty out of the FIFO, per packet(!),
* is MIDI_BYTES_PER_SECOND * syt_interval / sample_rate. To avoid storing
* fractional values, the values in midi_fifo_used[] are measured in bytes
* multiplied by the sample rate.
*/
static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port)
{
struct amdtp_am824 *p = s->protocol;
int used;
used = p->midi_fifo_used[port];
if (used == 0) /* common shortcut */
return true;
used -= MIDI_BYTES_PER_SECOND * s->syt_interval;
used = max(used, 0);
p->midi_fifo_used[port] = used;
return used < p->midi_fifo_limit;
}
static void midi_rate_use_one_byte(struct amdtp_stream *s, unsigned int port)
{
struct amdtp_am824 *p = s->protocol;
p->midi_fifo_used[port] += amdtp_rate_table[s->sfc];
}
static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer,
unsigned int frames, unsigned int data_block_counter)
{
struct amdtp_am824 *p = s->protocol;
unsigned int f, port;
u8 *b;
for (f = 0; f < frames; f++) {
b = (u8 *)&buffer[p->midi_position];
port = (data_block_counter + f) % 8;
if (f < MAX_MIDI_RX_BLOCKS &&
midi_ratelimit_per_packet(s, port) &&
p->midi[port] != NULL &&
snd_rawmidi_transmit(p->midi[port], &b[1], 1) == 1) {
midi_rate_use_one_byte(s, port);
b[0] = 0x81;
} else {
b[0] = 0x80;
b[1] = 0;
}
b[2] = 0;
b[3] = 0;
buffer += s->data_block_quadlets;
}
}
static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer,
unsigned int frames, unsigned int data_block_counter)
{
struct amdtp_am824 *p = s->protocol;
int len;
u8 *b;
int f;
for (f = 0; f < frames; f++) {
unsigned int port = f;
if (!(s->flags & CIP_UNALIGHED_DBC))
port += data_block_counter;
port %= 8;
b = (u8 *)&buffer[p->midi_position];
len = b[0] - 0x80;
if ((1 <= len) && (len <= 3) && (p->midi[port]))
snd_rawmidi_receive(p->midi[port], b + 1, len);
buffer += s->data_block_quadlets;
}
}
static void process_it_ctx_payloads(struct amdtp_stream *s, const struct pkt_desc *desc,
unsigned int count, struct snd_pcm_substream *pcm)
{
struct amdtp_am824 *p = s->protocol;
unsigned int pcm_frames = 0;
int i;
for (i = 0; i < count; ++i) {
__be32 *buf = desc->ctx_payload;
unsigned int data_blocks = desc->data_blocks;
if (pcm) {
write_pcm_s32(s, pcm, buf, data_blocks, pcm_frames);
pcm_frames += data_blocks * s->pcm_frame_multiplier;
} else {
write_pcm_silence(s, buf, data_blocks);
}
if (p->midi_ports) {
write_midi_messages(s, buf, data_blocks,
desc->data_block_counter);
}
desc = amdtp_stream_next_packet_desc(s, desc);
}
}
static void process_ir_ctx_payloads(struct amdtp_stream *s, const struct pkt_desc *desc,
unsigned int count, struct snd_pcm_substream *pcm)
{
struct amdtp_am824 *p = s->protocol;
unsigned int pcm_frames = 0;
int i;
for (i = 0; i < count; ++i) {
__be32 *buf = desc->ctx_payload;
unsigned int data_blocks = desc->data_blocks;
if (pcm) {
read_pcm_s32(s, pcm, buf, data_blocks, pcm_frames);
pcm_frames += data_blocks * s->pcm_frame_multiplier;
}
if (p->midi_ports) {
read_midi_messages(s, buf, data_blocks,
desc->data_block_counter);
}
desc = amdtp_stream_next_packet_desc(s, desc);
}
}
/**
* amdtp_am824_init - initialize an AMDTP stream structure to handle AM824
* data block
* @s: the AMDTP stream to initialize
* @unit: the target of the stream
* @dir: the direction of stream
* @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
*/
int amdtp_am824_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir, unsigned int flags)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
if (dir == AMDTP_IN_STREAM)
process_ctx_payloads = process_ir_ctx_payloads;
else
process_ctx_payloads = process_it_ctx_payloads;
return amdtp_stream_init(s, unit, dir, flags, CIP_FMT_AM,
process_ctx_payloads, sizeof(struct amdtp_am824));
}
EXPORT_SYMBOL_GPL(amdtp_am824_init);
|
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/reboot.h>
#define SMU_11_0_PARTIAL_PPTABLE
#define SWSMU_CODE_LAYER_L3
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_atombios.h"
#include "smu_v11_0.h"
#include "soc15_common.h"
#include "atom.h"
#include "amdgpu_ras.h"
#include "smu_cmn.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_11_0_offset.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
/*
* DO NOT use these for err/warn/info/debug messages.
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
* They are more MGPU friendly.
*/
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug
MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin");
#define SMU11_VOLTAGE_SCALE 4
#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
#define mmTHM_BACO_CNTL_ARCT 0xA7
#define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0
static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t data, loop = 0;
do {
usleep_range(1000, 1100);
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
} while ((data & 0x100) && (++loop < 100));
}
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
char ucode_prefix[25];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
struct amdgpu_firmware_info *ucode = NULL;
if (amdgpu_sriov_vf(adev) &&
((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) ||
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7))))
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
ucode->fw = adev->pm.fw;
header = (const struct common_firmware_header *)ucode->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
out:
if (err)
amdgpu_ucode_release(&adev->pm.fw);
return err;
}
void smu_v11_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
const struct smc_firmware_header_v1_0 *hdr;
uint32_t addr_start = MP1_SRAM;
uint32_t i;
uint32_t smc_fw_size;
uint32_t mp1_fw_flags;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
smc_fw_size = hdr->header.ucode_size_bytes;
for (i = 1; i < smc_fw_size/4 - 1; i++) {
WREG32_PCIE(addr_start, src[i]);
addr_start += 4;
}
WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
1 & MP1_SMN_PUB_CTRL__RESET_MASK);
WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
for (i = 0; i < adev->usec_timeout; i++) {
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -ETIME;
return 0;
}
int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
return 0;
return -EIO;
}
int smu_v11_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
smu_program = (smu_version >> 24) & 0xff;
smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
adev->pm.fw_version = smu_version;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
case IP_VERSION(11, 0, 9):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
case IP_VERSION(11, 0, 5):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
case IP_VERSION(11, 0, 7):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
break;
case IP_VERSION(11, 0, 11):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
break;
case IP_VERSION(11, 5, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
break;
case IP_VERSION(11, 0, 12):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
break;
case IP_VERSION(11, 0, 13):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
break;
case IP_VERSION(11, 0, 8):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
break;
case IP_VERSION(11, 0, 2):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
break;
default:
dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
amdgpu_ip_version(adev, MP1_HWIP, 0));
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
/*
* 1. if_version mismatch is not critical as our fw is designed
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
}
static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
struct amdgpu_device *adev = smu->adev;
uint32_t ppt_offset_bytes;
const struct smc_firmware_header_v2_0 *v2;
v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
*size = le32_to_cpu(v2->ppt_size_bytes);
*table = (uint8_t *)v2 + ppt_offset_bytes;
return 0;
}
static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
uint32_t *size, uint32_t pptable_id)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v2_1 *v2_1;
struct smc_soft_pptable_entry *entries;
uint32_t pptable_count = 0;
int i = 0;
v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
entries = (struct smc_soft_pptable_entry *)
((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
pptable_count = le32_to_cpu(v2_1->pptable_count);
for (i = 0; i < pptable_count; i++) {
if (le32_to_cpu(entries[i].id) == pptable_id) {
*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
*size = le32_to_cpu(entries[i].ppt_size_bytes);
break;
}
}
if (i == pptable_count)
return -EINVAL;
return 0;
}
int smu_v11_0_setup_pptable(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
int ret, index;
uint32_t size = 0;
uint16_t atom_table_size;
uint8_t frev, crev;
void *table;
uint16_t version_major, version_minor;
if (!amdgpu_sriov_vf(adev)) {
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
break;
case 1:
ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
smu->smu_table.boot_values.pp_table_id);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
return ret;
goto out;
}
}
dev_info(adev->dev, "use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
size = atom_table_size;
out:
if (!smu->smu_table.power_play_table)
smu->smu_table.power_play_table = table;
if (!smu->smu_table.power_play_table_size)
smu->smu_table.power_play_table_size = size;
return 0;
}
int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
int ret = 0;
smu_table->driver_pptable =
kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
if (!smu_table->driver_pptable) {
ret = -ENOMEM;
goto err0_out;
}
smu_table->max_sustainable_clocks =
kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
if (!smu_table->max_sustainable_clocks) {
ret = -ENOMEM;
goto err1_out;
}
/* Arcturus does not support OVERDRIVE */
if (tables[SMU_TABLE_OVERDRIVE].size) {
smu_table->overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
if (!smu_table->overdrive_table) {
ret = -ENOMEM;
goto err2_out;
}
smu_table->boot_overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
if (!smu_table->boot_overdrive_table) {
ret = -ENOMEM;
goto err3_out;
}
smu_table->user_overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
if (!smu_table->user_overdrive_table) {
ret = -ENOMEM;
goto err4_out;
}
}
return 0;
err4_out:
kfree(smu_table->boot_overdrive_table);
err3_out:
kfree(smu_table->overdrive_table);
err2_out:
kfree(smu_table->max_sustainable_clocks);
err1_out:
kfree(smu_table->driver_pptable);
err0_out:
return ret;
}
int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
kfree(smu_table->gpu_metrics_table);
kfree(smu_table->user_overdrive_table);
kfree(smu_table->boot_overdrive_table);
kfree(smu_table->overdrive_table);
kfree(smu_table->max_sustainable_clocks);
kfree(smu_table->driver_pptable);
kfree(smu_table->clocks_table);
smu_table->gpu_metrics_table = NULL;
smu_table->user_overdrive_table = NULL;
smu_table->boot_overdrive_table = NULL;
smu_table->overdrive_table = NULL;
smu_table->max_sustainable_clocks = NULL;
smu_table->driver_pptable = NULL;
smu_table->clocks_table = NULL;
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = NULL;
kfree(smu_table->driver_smu_config_table);
kfree(smu_table->ecc_table);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
smu_table->driver_smu_config_table = NULL;
smu_table->ecc_table = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
kfree(smu_dpm->dpm_context);
kfree(smu_dpm->golden_dpm_context);
kfree(smu_dpm->dpm_current_power_state);
kfree(smu_dpm->dpm_request_power_state);
smu_dpm->dpm_context = NULL;
smu_dpm->golden_dpm_context = NULL;
smu_dpm->dpm_context_size = 0;
smu_dpm->dpm_current_power_state = NULL;
smu_dpm->dpm_request_power_state = NULL;
return 0;
}
int smu_v11_0_init_power(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==
IP_VERSION(11, 5, 0) ?
sizeof(struct smu_11_5_power_context) :
sizeof(struct smu_11_0_power_context);
smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = size;
return 0;
}
int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
kfree(smu_power->power_context);
smu_power->power_context = NULL;
smu_power->power_context_size = 0;
return 0;
}
static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
uint8_t clk_id,
uint8_t syspll_id,
uint32_t *clk_freq)
{
struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
int ret, index;
input.clk_id = clk_id;
input.syspll_id = syspll_id;
input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
(uint32_t *)&input, sizeof(input));
if (ret)
return -EINVAL;
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
*clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
return 0;
}
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
{
int ret, index;
uint16_t size;
uint8_t frev, crev;
struct atom_common_table_header *header;
struct atom_firmware_info_v3_3 *v_3_3;
struct atom_firmware_info_v3_1 *v_3_1;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
firmwareinfo);
ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
(uint8_t **)&header);
if (ret)
return ret;
if (header->format_revision != 3) {
dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
return -EINVAL;
}
switch (header->content_revision) {
case 0:
case 1:
case 2:
v_3_1 = (struct atom_firmware_info_v3_1 *)header;
smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
smu->smu_table.boot_values.socclk = 0;
smu->smu_table.boot_values.dcefclk = 0;
smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = 0;
smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
break;
case 3:
case 4:
default:
v_3_3 = (struct atom_firmware_info_v3_3 *)header;
smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
smu->smu_table.boot_values.socclk = 0;
smu->smu_table.boot_values.dcefclk = 0;
smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
}
smu->smu_table.boot_values.format_revision = header->format_revision;
smu->smu_table.boot_values.content_revision = header->content_revision;
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
(uint8_t)0,
&smu->smu_table.boot_values.socclk);
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
(uint8_t)0,
&smu->smu_table.boot_values.dcefclk);
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL0_ECLK_ID,
(uint8_t)0,
&smu->smu_table.boot_values.eclk);
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL0_VCLK_ID,
(uint8_t)0,
&smu->smu_table.boot_values.vclk);
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL0_DCLK_ID,
(uint8_t)0,
&smu->smu_table.boot_values.dclk);
if ((smu->smu_table.boot_values.format_revision == 3) &&
(smu->smu_table.boot_values.content_revision >= 2))
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
(uint8_t)SMU11_SYSPLL1_2_ID,
&smu->smu_table.boot_values.fclk);
smu_v11_0_atom_get_smu_clockinfo(smu->adev,
(uint8_t)SMU11_SYSPLL3_1_LCLK_ID,
(uint8_t)SMU11_SYSPLL3_1_ID,
&smu->smu_table.boot_values.lclk);
return 0;
}
int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
int ret = 0;
uint64_t address;
uint32_t address_low, address_high;
if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
return ret;
address = (uintptr_t)memory_pool->cpu_addr;
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
address_high,
NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
address_low,
NULL);
if (ret)
return ret;
address = memory_pool->mc_address;
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
return ret;
}
int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
return ret;
}
int smu_v11_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
int ret = 0;
if (driver_table->mc_address) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address),
NULL);
if (!ret)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address),
NULL);
}
return ret;
}
int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
if (tool_table->mc_address) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address),
NULL);
if (!ret)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address),
NULL);
}
return ret;
}
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
struct amdgpu_device *adev = smu->adev;
/* Navy_Flounder/Dimgrey_Cavefish do not support to change
* display num currently
*/
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_NumOfDisplays,
count,
NULL);
}
int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
uint32_t feature_mask[2];
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) {
ret = -EINVAL;
goto failed;
}
bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
goto failed;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
feature_mask[0], NULL);
if (ret)
goto failed;
failed:
return ret;
}
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
}
int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
static int
smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
enum smu_clk_type clock_select)
{
int ret = 0;
int clk_id;
if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
(smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clock_select);
if (clk_id < 0)
return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
clk_id << 16, clock);
if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
}
if (*clock != 0)
return 0;
/* if DC limit is zero, return AC limit */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
clk_id << 16, clock);
if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
}
return 0;
}
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
smu->smu_table.max_sustainable_clocks;
int ret = 0;
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
max_sustainable_clocks->display_clock = 0xFFFFFFFF;
max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->uclock),
SMU_UCLK);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
__func__);
return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->soc_clock),
SMU_SOCCLK);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
__func__);
return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->dcef_clock),
SMU_DCEFCLK);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
__func__);
return ret;
}
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->display_clock),
SMU_DISPCLK);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
__func__);
return ret;
}
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->phy_clock),
SMU_PHYCLK);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
__func__);
return ret;
}
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->pixel_clock),
SMU_PIXCLK);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
__func__);
return ret;
}
}
if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
return 0;
}
int smu_v11_0_get_current_power_limit(struct smu_context *smu,
uint32_t *power_limit)
{
int power_src;
int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
return -EINVAL;
power_src = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_PWR,
smu->adev->pm.ac_power ?
SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (power_src < 0)
return -EINVAL;
/*
* BIT 24-31: ControllerId (only PPT0 is supported for now)
* BIT 16-23: PowerSource
*/
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit,
(0 << 24) | (power_src << 16),
power_limit);
if (ret)
dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
return ret;
}
int smu_v11_0_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
{
int power_src;
int ret = 0;
uint32_t limit_param;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
return -EINVAL;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
return -EOPNOTSUPP;
}
power_src = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_PWR,
smu->adev->pm.ac_power ?
SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (power_src < 0)
return -EINVAL;
/*
* BIT 24-31: ControllerId (only PPT0 is supported for now)
* BIT 16-23: PowerSource
* BIT 0-15: PowerLimit
*/
limit_param = (limit & 0xFFFF);
limit_param |= 0 << 24;
limit_param |= (power_src) << 16;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
}
smu->current_power_limit = limit;
return 0;
}
static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
{
return smu_cmn_send_smc_msg(smu,
SMU_MSG_ReenableAcDcInterrupt,
NULL);
}
static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
{
int ret = 0;
if (smu->dc_controlled_by_gpio &&
smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
ret = smu_v11_0_ack_ac_dc_interrupt(smu);
return ret;
}
void smu_v11_0_interrupt_work(struct smu_context *smu)
{
if (smu_v11_0_ack_ac_dc_interrupt(smu))
dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
}
int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
if (smu->smu_table.thermal_controller_type) {
ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
if (ret)
return ret;
}
/*
* After init there might have been missed interrupts triggered
* before driver registers for interrupt (Ex. AC/DC).
*/
return smu_v11_0_process_pending_interrupt(smu);
}
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
{
return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
}
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
}
int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
{
struct amdgpu_device *adev = smu->adev;
uint32_t vdd = 0, val_vid = 0;
if (!value)
return -EINVAL;
val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
*value = vdd;
return 0;
}
int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
*clock_req)
{
enum amd_pp_clock_type clk_type = clock_req->clock_type;
int ret = 0;
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
case amd_pp_dcef_clock:
clk_select = SMU_DCEFCLK;
break;
case amd_pp_disp_clock:
clk_select = SMU_DISPCLK;
break;
case amd_pp_pixel_clock:
clk_select = SMU_PIXCLK;
break;
case amd_pp_phy_clock:
clk_select = SMU_PHYCLK;
break;
case amd_pp_mem_clock:
clk_select = SMU_UCLK;
break;
default:
dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
ret = -EINVAL;
break;
}
if (ret)
goto failed;
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
}
failed:
return ret;
}
int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
struct amdgpu_device *adev = smu->adev;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
}
return ret;
}
uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return AMD_FAN_CTRL_AUTO;
else
return smu->user_dpm_profile.fan_mode;
}
static int
smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
{
int ret = 0;
if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
if (ret)
dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
__func__, (auto_fan_control ? "Start" : "Stop"));
return ret;
}
static int
smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
{
struct amdgpu_device *adev = smu->adev;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, TMIN, 0));
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, FDO_PWM_MODE, mode));
return 0;
}
int
smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
uint32_t duty100, duty;
uint64_t tmp64;
speed = min_t(uint32_t, speed, 255);
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
if (!duty100)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 255);
duty = (uint32_t)tmp64;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
/*
* crystal_clock_freq used for fan speed rpm calculation is
* always 25Mhz. So, hardcode it as 2500(in 10K unit).
*/
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
if (speed == 0)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
* for minimum fan speed:
* - For some NV10 SKU, the fan speed cannot be set lower than
* 700 RPM.
* - For some Sienna Cichlid SKU, the fan speed cannot be set
* lower than 500 RPM.
*/
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
CG_TACH_CTRL, TARGET_PERIOD,
tach_period));
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
}
int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
uint32_t *speed)
{
struct amdgpu_device *adev = smu->adev;
uint32_t duty100, duty;
uint64_t tmp64;
/*
* For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
* detected via register retrieving. To workaround this, we will
* report the fan speed as 0 PWM if user just requested such.
*/
if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
&& !smu->user_dpm_profile.fan_speed_pwm) {
*speed = 0;
return 0;
}
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
CG_THERMAL_STATUS, FDO_PWM_DUTY);
if (!duty100)
return -EINVAL;
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
*speed = min_t(uint32_t, tmp64, 255);
return 0;
}
int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
uint32_t *speed)
{
struct amdgpu_device *adev = smu->adev;
uint32_t crystal_clock_freq = 2500;
uint32_t tach_status;
uint64_t tmp64;
/*
* For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
* detected via register retrieving. To workaround this, we will
* report the fan speed as 0 RPM if user just requested such.
*/
if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
&& !smu->user_dpm_profile.fan_speed_rpm) {
*speed = 0;
return 0;
}
tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS);
if (tach_status) {
do_div(tmp64, tach_status);
*speed = (uint32_t)tmp64;
} else {
dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n");
*speed = 0;
}
return 0;
}
int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
int ret = 0;
switch (mode) {
case AMD_FAN_CTRL_NONE:
ret = smu_v11_0_auto_fan_control(smu, 0);
if (!ret)
ret = smu_v11_0_set_fan_speed_pwm(smu, 255);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
break;
case AMD_FAN_CTRL_AUTO:
ret = smu_v11_0_auto_fan_control(smu, 1);
break;
default:
break;
}
if (ret) {
dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
return -EINVAL;
}
return ret;
}
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
NULL);
}
static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* For THM irqs */
val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
/* For MP1 SW irqs */
val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* For THM irqs */
low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
smu->thermal_range.software_shutdown_temp);
val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
/* For MP1 SW irqs */
val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val);
val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
break;
default:
break;
}
return 0;
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
* ctxid is used to distinguish different
* events for SMCToHost interrupt.
*/
uint32_t ctxid = entry->src_data[0];
uint32_t data;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
schedule_delayed_work(&smu->swctf_delayed_work,
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
break;
default:
dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
src_id);
break;
}
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
/*
* HW CTF just occurred. Shutdown to prevent further damage.
*/
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
/* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) {
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
/*
* Increment the throttle interrupt counter
*/
atomic64_inc(&smu->throttle_int_counter);
if (!atomic_read(&adev->throttling_logging_enabled))
return 0;
if (__ratelimit(&adev->throttling_logging_rs))
schedule_work(&smu->throttling_logging_work);
break;
default:
dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
ctxid, client_id);
break;
}
}
}
return 0;
}
static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
{
.set = smu_v11_0_set_irq_state,
.process = smu_v11_0_irq_process,
};
int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
irq_src->num_types = 1;
irq_src->funcs = &smu_v11_0_irq_funcs;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
THM_11_0__SRCID__THM_DIG_THERM_L2H,
irq_src);
if (ret)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
THM_11_0__SRCID__THM_DIG_THERM_H2L,
irq_src);
if (ret)
return ret;
/* Register CTF(GPIO_19) interrupt */
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
SMUIO_11_0__SRCID__SMUIO_GPIO19,
irq_src);
if (ret)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
return ret;
}
int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
if (!max_clocks || !table_context->max_sustainable_clocks)
return -EINVAL;
sustainable_clocks = table_context->max_sustainable_clocks;
max_clocks->dcfClockInKhz =
(unsigned int) sustainable_clocks->dcef_clock * 1000;
max_clocks->displayClockInKhz =
(unsigned int) sustainable_clocks->display_clock * 1000;
max_clocks->phyClockInKhz =
(unsigned int) sustainable_clocks->phy_clock * 1000;
max_clocks->pixelClockInKhz =
(unsigned int) sustainable_clocks->pixel_clock * 1000;
max_clocks->uClockInKhz =
(unsigned int) sustainable_clocks->uclock * 1000;
max_clocks->socClockInKhz =
(unsigned int) sustainable_clocks->soc_clock * 1000;
max_clocks->dscClockInKhz = 0;
max_clocks->dppClockInKhz = 0;
max_clocks->fabricClockInKhz = 0;
return 0;
}
int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
}
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
int smu_v11_0_get_bamaco_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
return 0;
if (smu_baco->maco_support)
bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */
if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
return bamaco_support |= BACO_SUPPORT;
/* Arcturus does not support this bit mask */
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return 0;
return (bamaco_support |= BACO_SUPPORT);
}
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
return smu_baco->state;
}
#define D3HOT_BACO_SEQUENCE 0
#define D3HOT_BAMACO_SEQUENCE 2
int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
uint32_t data;
int ret = 0;
if (smu_v11_0_baco_get_state(smu) == state)
return 0;
if (state == SMU_BACO_STATE_ENTER) {
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
if (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
D3HOT_BAMACO_SEQUENCE,
NULL);
else
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
D3HOT_BACO_SEQUENCE,
NULL);
break;
default:
if (!ras || !adev->ras_enabled ||
(adev->init_lvl->level ==
AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) {
if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
IP_VERSION(11, 0, 2)) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
} else {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
break;
}
} else {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
return ret;
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
}
if (!ret)
smu_baco->state = state;
return ret;
}
int smu_v11_0_baco_enter(struct smu_context *smu)
{
int ret = 0;
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
if (ret)
return ret;
msleep(10);
return ret;
}
int smu_v11_0_baco_exit(struct smu_context *smu)
{
int ret;
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
if (!ret) {
/*
* Poll BACO exit status to ensure FW has completed
* BACO exit process to avoid timing issues.
*/
smu_v11_0_poll_baco_exit(smu);
}
return ret;
}
int smu_v11_0_mode1_reset(struct smu_context *smu)
{
int ret = 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
if (!ret)
msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
return ret;
}
int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL);
return ret;
}
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
uint32_t clock_limit;
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk;
break;
case SMU_GFXCLK:
case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk;
break;
case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk;
break;
default:
clock_limit = 0;
break;
}
/* clock in Mhz unit */
if (min)
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
return 0;
}
clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) {
ret = -EINVAL;
goto failed;
}
param = (clk_id & 0xffff) << 16;
if (max) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
failed:
return ret;
}
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
uint32_t max,
bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0)
return clk_id;
if (max > 0) {
if (automatic)
param = (uint32_t)((clk_id << 16) | 0xffff);
else
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
goto out;
}
if (min > 0) {
if (automatic)
param = (uint32_t)((clk_id << 16) | 0);
else
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
goto out;
}
out:
return ret;
}
int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
uint32_t max)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (min <= 0 && max <= 0)
return -EINVAL;
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0)
return clk_id;
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
param, NULL);
if (ret)
return ret;
}
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
param, NULL);
if (ret)
return ret;
}
return ret;
}
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
struct smu_11_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
struct smu_11_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_11_0_dpm_table *mem_table =
&dpm_context->dpm_tables.uclk_table;
struct smu_11_0_dpm_table *soc_table =
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
uint32_t sclk_min = 0, sclk_max = 0;
uint32_t mclk_min = 0, mclk_max = 0;
uint32_t socclk_min = 0, socclk_max = 0;
int ret = 0;
bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
sclk_min = sclk_max = gfx_table->max;
mclk_min = mclk_max = mem_table->max;
socclk_min = socclk_max = soc_table->max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
sclk_min = sclk_max = gfx_table->min;
mclk_min = mclk_max = mem_table->min;
socclk_min = socclk_max = soc_table->min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
sclk_min = gfx_table->min;
sclk_max = gfx_table->max;
mclk_min = mem_table->min;
mclk_max = mem_table->max;
socclk_min = soc_table->min;
socclk_max = soc_table->max;
auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
mclk_min = mclk_max = pstate_table->uclk_pstate.min;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
return 0;
default:
dev_err(adev->dev, "Invalid performance level %d\n", level);
return -EINVAL;
}
/*
* Separate MCLK and SOCCLK soft min/max settings are not allowed
* on Arcturus.
*/
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
auto_level = false;
}
if (sclk_min && sclk_max) {
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
sclk_max,
auto_level);
if (ret)
return ret;
}
if (mclk_min && mclk_max) {
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
mclk_max,
auto_level);
if (ret)
return ret;
}
if (socclk_min && socclk_max) {
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
socclk_max,
auto_level);
if (ret)
return ret;
}
return ret;
}
int smu_v11_0_set_power_source(struct smu_context *smu,
enum smu_power_src_type power_src)
{
int pwr_source;
pwr_source = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_PWR,
(uint32_t)power_src);
if (pwr_source < 0)
return -EINVAL;
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_NotifyPowerSource,
pwr_source,
NULL);
}
int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint16_t level,
uint32_t *value)
{
int ret = 0, clk_id = 0;
uint32_t param;
if (!value)
return -EINVAL;
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
return 0;
clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0)
return clk_id;
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
param,
value);
if (ret)
return ret;
/*
* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
* now, we un-support it
*/
*value = *value & 0x7fffffff;
return ret;
}
int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
return smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
0xff,
value);
}
int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table)
{
int ret = 0;
uint32_t clk;
int i;
ret = smu_v11_0_get_dpm_level_count(smu,
clk_type,
&single_dpm_table->count);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
return ret;
}
for (i = 0; i < single_dpm_table->count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
i,
&clk);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
return ret;
}
single_dpm_table->dpm_levels[i].value = clk;
single_dpm_table->dpm_levels[i].enabled = true;
if (i == 0)
single_dpm_table->min = clk;
else if (i == single_dpm_table->count - 1)
single_dpm_table->max = clk;
}
return 0;
}
int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min_value,
uint32_t *max_value)
{
uint32_t level_count = 0;
int ret = 0;
if (!min_value && !max_value)
return -EINVAL;
if (min_value) {
/* by default, level 0 clock value as min value */
ret = smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
0,
min_value);
if (ret)
return ret;
}
if (max_value) {
ret = smu_v11_0_get_dpm_level_count(smu,
clk_type,
&level_count);
if (ret)
return ret;
ret = smu_v11_0_get_dpm_freq_by_index(smu,
clk_type,
level_count - 1,
max_value);
if (ret)
return ret;
}
return ret;
}
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
}
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
{
uint32_t width_level;
width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
if (width_level > LINK_WIDTH_MAX)
width_level = 0;
return link_width[width_level];
}
int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
{
uint32_t speed_level;
speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
if (speed_level > LINK_SPEED_MAX)
speed_level = 0;
return link_speed[speed_level];
}
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
bool enablement)
{
int ret = 0;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
return ret;
}
int smu_v11_0_deep_sleep_control(struct smu_context *smu,
bool enablement)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
return ret;
}
int smu_v11_0_restore_user_od_settings(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
void *user_od_table = table_context->user_overdrive_table;
int ret = 0;
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true);
if (ret)
dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
return ret;
}
void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IDT Interprise 79RC32434 watchdog driver
*
* Copyright (C) 2006, Ondrej Zajicek <[email protected]>
* Copyright (C) 2008, Florian Fainelli <[email protected]>
*
* based on
* SoftDog 0.05: A Software Watchdog Device
*
* (c) Copyright 1996 Alan Cox <[email protected]>,
* All Rights Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/fs.h> /* For file operations */
#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For devm_ioremap */
#include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */
#define VERSION "1.0"
static struct {
unsigned long inuse;
spinlock_t io_lock;
} rc32434_wdt_device;
static struct integ __iomem *wdt_reg;
static int expect_close;
/* Board internal clock speed in Hz,
* the watchdog timer ticks at. */
extern unsigned int idt_cpu_freq;
/* translate wtcompare value to seconds and vice versa */
#define WTCOMP2SEC(x) (x / idt_cpu_freq)
#define SEC2WTCOMP(x) (x * idt_cpu_freq)
/* Use a default timeout of 20s. This should be
* safe for CPU clock speeds up to 400MHz, as
* ((2 ^ 32) - 1) / (400MHz / 2) = 21s. */
#define WATCHDOG_TIMEOUT 20
static int timeout = WATCHDOG_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/* apply or and nand masks to data read from addr and write back */
#define SET_BITS(addr, or, nand) \
writel((readl(&addr) | or) & ~nand, &addr)
static int rc32434_wdt_set(int new_timeout)
{
int max_to = WTCOMP2SEC((u32)-1);
if (new_timeout < 0 || new_timeout > max_to) {
pr_err("timeout value must be between 0 and %d\n", max_to);
return -EINVAL;
}
timeout = new_timeout;
spin_lock(&rc32434_wdt_device.io_lock);
writel(SEC2WTCOMP(timeout), &wdt_reg->wtcompare);
spin_unlock(&rc32434_wdt_device.io_lock);
return 0;
}
static void rc32434_wdt_start(void)
{
u32 or, nand;
spin_lock(&rc32434_wdt_device.io_lock);
/* zero the counter before enabling */
writel(0, &wdt_reg->wtcount);
/* don't generate a non-maskable interrupt,
* do a warm reset instead */
nand = 1 << RC32434_ERR_WNE;
or = 1 << RC32434_ERR_WRE;
/* reset the ERRCS timeout bit in case it's set */
nand |= 1 << RC32434_ERR_WTO;
SET_BITS(wdt_reg->errcs, or, nand);
/* set the timeout (either default or based on module param) */
rc32434_wdt_set(timeout);
/* reset WTC timeout bit and enable WDT */
nand = 1 << RC32434_WTC_TO;
or = 1 << RC32434_WTC_EN;
SET_BITS(wdt_reg->wtc, or, nand);
spin_unlock(&rc32434_wdt_device.io_lock);
pr_info("Started watchdog timer\n");
}
static void rc32434_wdt_stop(void)
{
spin_lock(&rc32434_wdt_device.io_lock);
/* Disable WDT */
SET_BITS(wdt_reg->wtc, 0, 1 << RC32434_WTC_EN);
spin_unlock(&rc32434_wdt_device.io_lock);
pr_info("Stopped watchdog timer\n");
}
static void rc32434_wdt_ping(void)
{
spin_lock(&rc32434_wdt_device.io_lock);
writel(0, &wdt_reg->wtcount);
spin_unlock(&rc32434_wdt_device.io_lock);
}
static int rc32434_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &rc32434_wdt_device.inuse))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
rc32434_wdt_start();
rc32434_wdt_ping();
return stream_open(inode, file);
}
static int rc32434_wdt_release(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
rc32434_wdt_stop();
module_put(THIS_MODULE);
} else {
pr_crit("device closed unexpectedly. WDT will not stop!\n");
rc32434_wdt_ping();
}
clear_bit(0, &rc32434_wdt_device.inuse);
return 0;
}
static ssize_t rc32434_wdt_write(struct file *file, const char *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
rc32434_wdt_ping();
return len;
}
return 0;
}
static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int new_timeout;
unsigned int value;
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
.identity = "RC32434_WDT Watchdog",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
value = 0;
if (copy_to_user(argp, &value, sizeof(int)))
return -EFAULT;
break;
case WDIOC_SETOPTIONS:
if (copy_from_user(&value, argp, sizeof(int)))
return -EFAULT;
switch (value) {
case WDIOS_ENABLECARD:
rc32434_wdt_start();
break;
case WDIOS_DISABLECARD:
rc32434_wdt_stop();
break;
default:
return -EINVAL;
}
break;
case WDIOC_KEEPALIVE:
rc32434_wdt_ping();
break;
case WDIOC_SETTIMEOUT:
if (copy_from_user(&new_timeout, argp, sizeof(int)))
return -EFAULT;
if (rc32434_wdt_set(new_timeout))
return -EINVAL;
fallthrough;
case WDIOC_GETTIMEOUT:
return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
default:
return -ENOTTY;
}
return 0;
}
static const struct file_operations rc32434_wdt_fops = {
.owner = THIS_MODULE,
.write = rc32434_wdt_write,
.unlocked_ioctl = rc32434_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = rc32434_wdt_open,
.release = rc32434_wdt_release,
};
static struct miscdevice rc32434_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &rc32434_wdt_fops,
};
static int rc32434_wdt_probe(struct platform_device *pdev)
{
int ret;
struct resource *r;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb532_wdt_res");
if (!r) {
pr_err("failed to retrieve resources\n");
return -ENODEV;
}
wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!wdt_reg) {
pr_err("failed to remap I/O resources\n");
return -ENXIO;
}
spin_lock_init(&rc32434_wdt_device.io_lock);
/* Make sure the watchdog is not running */
rc32434_wdt_stop();
/* Check that the heartbeat value is within it's range;
* if not reset to the default */
if (rc32434_wdt_set(timeout)) {
rc32434_wdt_set(WATCHDOG_TIMEOUT);
pr_info("timeout value must be between 0 and %d\n",
WTCOMP2SEC((u32)-1));
}
ret = misc_register(&rc32434_wdt_miscdev);
if (ret < 0) {
pr_err("failed to register watchdog device\n");
return ret;
}
pr_info("Watchdog Timer version " VERSION ", timer margin: %d sec\n",
timeout);
return 0;
}
static void rc32434_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&rc32434_wdt_miscdev);
}
static void rc32434_wdt_shutdown(struct platform_device *pdev)
{
rc32434_wdt_stop();
}
static struct platform_driver rc32434_wdt_driver = {
.probe = rc32434_wdt_probe,
.remove = rc32434_wdt_remove,
.shutdown = rc32434_wdt_shutdown,
.driver = {
.name = "rc32434_wdt",
}
};
module_platform_driver(rc32434_wdt_driver);
MODULE_AUTHOR("Ondrej Zajicek <[email protected]>,"
"Florian Fainelli <[email protected]>");
MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Samsung Exynos850 SoC device tree source
*
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
* Copyright (C) 2021 Linaro Ltd.
*
* Samsung Exynos850 SoC device nodes are listed in this file.
* Exynos850 based board files can include this file and provide
* values for board specific bindings.
*/
#include <dt-bindings/clock/exynos850.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/samsung,exynos-usi.h>
/ {
/* Also known under engineering name Exynos3830 */
compatible = "samsung,exynos850";
#address-cells = <2>;
#size-cells = <1>;
interrupt-parent = <&gic>;
aliases {
pinctrl0 = &pinctrl_alive;
pinctrl1 = &pinctrl_cmgp;
pinctrl2 = &pinctrl_aud;
pinctrl3 = &pinctrl_hsi;
pinctrl4 = &pinctrl_core;
pinctrl5 = &pinctrl_peri;
};
arm-pmu {
compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
<&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
};
/* Main system clock (XTCXO); external, must be 26 MHz */
oscclk: clock-oscclk {
compatible = "fixed-clock";
clock-output-names = "oscclk";
#clock-cells = <0>;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu-map {
cluster0 {
core0 {
cpu = <&cpu0>;
};
core1 {
cpu = <&cpu1>;
};
core2 {
cpu = <&cpu2>;
};
core3 {
cpu = <&cpu3>;
};
};
cluster1 {
core0 {
cpu = <&cpu4>;
};
core1 {
cpu = <&cpu5>;
};
core2 {
cpu = <&cpu6>;
};
core3 {
cpu = <&cpu7>;
};
};
};
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x0>;
enable-method = "psci";
clocks = <&cmu_cpucl0 CLK_CLUSTER0_SCLK>;
clock-names = "cluster0_clk";
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x3>;
enable-method = "psci";
};
cpu4: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x100>;
enable-method = "psci";
clocks = <&cmu_cpucl1 CLK_CLUSTER1_SCLK>;
clock-names = "cluster1_clk";
};
cpu5: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x101>;
enable-method = "psci";
};
cpu6: cpu@102 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x102>;
enable-method = "psci";
};
cpu7: cpu@103 {
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x103>;
enable-method = "psci";
};
};
psci {
compatible = "arm,psci-1.0";
method = "smc";
};
timer {
compatible = "arm,armv8-timer";
/* Hypervisor Virtual Timer interrupt is not wired to GIC */
interrupts =
<GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
soc: soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x20000000>;
chipid@10000000 {
compatible = "samsung,exynos850-chipid";
reg = <0x10000000 0x100>;
};
timer@10040000 {
compatible = "samsung,exynos850-mct",
"samsung,exynos4210-mct";
reg = <0x10040000 0x800>;
interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&oscclk>, <&cmu_peri CLK_GOUT_MCT_PCLK>;
clock-names = "fin_pll", "mct";
};
pdma0: dma-controller@120c0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x120c0000 0x1000>;
clocks = <&cmu_core CLK_GOUT_PDMA_CORE_ACLK>;
clock-names = "apb_pclk";
#dma-cells = <1>;
interrupts = <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>;
arm,pl330-broken-no-flushp;
};
gic: interrupt-controller@12a01000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
#address-cells = <0>;
reg = <0x12a01000 0x1000>,
<0x12a02000 0x2000>,
<0x12a04000 0x2000>,
<0x12a06000 0x2000>;
interrupt-controller;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) |
IRQ_TYPE_LEVEL_HIGH)>;
};
pmu_system_controller: system-controller@11860000 {
compatible = "samsung,exynos850-pmu", "syscon";
reg = <0x11860000 0x10000>;
reboot: syscon-reboot {
compatible = "syscon-reboot";
regmap = <&pmu_system_controller>;
offset = <0x3a00>; /* SYSTEM_CONFIGURATION */
mask = <0x2>; /* SWRESET_SYSTEM */
value = <0x2>; /* reset value */
};
};
watchdog_cl0: watchdog@10050000 {
compatible = "samsung,exynos850-wdt";
reg = <0x10050000 0x100>;
interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cmu_peri CLK_GOUT_WDT0_PCLK>, <&oscclk>;
clock-names = "watchdog", "watchdog_src";
samsung,syscon-phandle = <&pmu_system_controller>;
samsung,cluster-index = <0>;
status = "disabled";
};
watchdog_cl1: watchdog@10060000 {
compatible = "samsung,exynos850-wdt";
reg = <0x10060000 0x100>;
interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cmu_peri CLK_GOUT_WDT1_PCLK>, <&oscclk>;
clock-names = "watchdog", "watchdog_src";
samsung,syscon-phandle = <&pmu_system_controller>;
samsung,cluster-index = <1>;
status = "disabled";
};
cmu_peri: clock-controller@10030000 {
compatible = "samsung,exynos850-cmu-peri";
reg = <0x10030000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_PERI_BUS>,
<&cmu_top CLK_DOUT_PERI_UART>,
<&cmu_top CLK_DOUT_PERI_IP>;
clock-names = "oscclk", "dout_peri_bus",
"dout_peri_uart", "dout_peri_ip";
};
cmu_cpucl1: clock-controller@10800000 {
compatible = "samsung,exynos850-cmu-cpucl1";
reg = <0x10800000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_CPUCL1_SWITCH>,
<&cmu_top CLK_DOUT_CPUCL1_DBG>;
clock-names = "oscclk", "dout_cpucl1_switch",
"dout_cpucl1_dbg";
};
cmu_cpucl0: clock-controller@10900000 {
compatible = "samsung,exynos850-cmu-cpucl0";
reg = <0x10900000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_CPUCL0_SWITCH>,
<&cmu_top CLK_DOUT_CPUCL0_DBG>;
clock-names = "oscclk", "dout_cpucl0_switch",
"dout_cpucl0_dbg";
};
cmu_g3d: clock-controller@11400000 {
compatible = "samsung,exynos850-cmu-g3d";
reg = <0x11400000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_G3D_SWITCH>;
clock-names = "oscclk", "dout_g3d_switch";
};
cmu_apm: clock-controller@11800000 {
compatible = "samsung,exynos850-cmu-apm";
reg = <0x11800000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_CLKCMU_APM_BUS>;
clock-names = "oscclk", "dout_clkcmu_apm_bus";
};
cmu_cmgp: clock-controller@11c00000 {
compatible = "samsung,exynos850-cmu-cmgp";
reg = <0x11c00000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_apm CLK_GOUT_CLKCMU_CMGP_BUS>;
clock-names = "oscclk", "gout_clkcmu_cmgp_bus";
};
cmu_core: clock-controller@12000000 {
compatible = "samsung,exynos850-cmu-core";
reg = <0x12000000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_CORE_BUS>,
<&cmu_top CLK_DOUT_CORE_CCI>,
<&cmu_top CLK_DOUT_CORE_MMC_EMBD>,
<&cmu_top CLK_DOUT_CORE_SSS>;
clock-names = "oscclk", "dout_core_bus",
"dout_core_cci", "dout_core_mmc_embd",
"dout_core_sss";
};
cmu_top: clock-controller@120e0000 {
compatible = "samsung,exynos850-cmu-top";
reg = <0x120e0000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>;
clock-names = "oscclk";
};
cmu_mfcmscl: clock-controller@12c00000 {
compatible = "samsung,exynos850-cmu-mfcmscl";
reg = <0x12c00000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>,
<&cmu_top CLK_DOUT_MFCMSCL_MFC>,
<&cmu_top CLK_DOUT_MFCMSCL_M2M>,
<&cmu_top CLK_DOUT_MFCMSCL_MCSC>,
<&cmu_top CLK_DOUT_MFCMSCL_JPEG>;
clock-names = "oscclk", "dout_mfcmscl_mfc",
"dout_mfcmscl_m2m", "dout_mfcmscl_mcsc",
"dout_mfcmscl_jpeg";
};
cmu_dpu: clock-controller@13000000 {
compatible = "samsung,exynos850-cmu-dpu";
reg = <0x13000000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_DPU>;
clock-names = "oscclk", "dout_dpu";
};
cmu_hsi: clock-controller@13400000 {
compatible = "samsung,exynos850-cmu-hsi";
reg = <0x13400000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>,
<&cmu_top CLK_DOUT_HSI_BUS>,
<&cmu_top CLK_DOUT_HSI_MMC_CARD>,
<&cmu_top CLK_DOUT_HSI_USB20DRD>;
clock-names = "oscclk", "dout_hsi_bus",
"dout_hsi_mmc_card", "dout_hsi_usb20drd";
};
cmu_is: clock-controller@14500000 {
compatible = "samsung,exynos850-cmu-is";
reg = <0x14500000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>,
<&cmu_top CLK_DOUT_IS_BUS>,
<&cmu_top CLK_DOUT_IS_ITP>,
<&cmu_top CLK_DOUT_IS_VRA>,
<&cmu_top CLK_DOUT_IS_GDC>;
clock-names = "oscclk", "dout_is_bus", "dout_is_itp",
"dout_is_vra", "dout_is_gdc";
};
cmu_aud: clock-controller@14a00000 {
compatible = "samsung,exynos850-cmu-aud";
reg = <0x14a00000 0x8000>;
#clock-cells = <1>;
clocks = <&oscclk>, <&cmu_top CLK_DOUT_AUD>;
clock-names = "oscclk", "dout_aud";
};
pinctrl_alive: pinctrl@11850000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x11850000 0x1000>;
wakeup-interrupt-controller {
compatible = "samsung,exynos850-wakeup-eint",
"samsung,exynos7-wakeup-eint";
};
};
pinctrl_cmgp: pinctrl@11c30000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x11c30000 0x1000>;
wakeup-interrupt-controller {
compatible = "samsung,exynos850-wakeup-eint",
"samsung,exynos7-wakeup-eint";
};
};
pinctrl_core: pinctrl@12070000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x12070000 0x1000>;
interrupts = <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>;
};
trng: rng@12081400 {
compatible = "samsung,exynos850-trng";
reg = <0x12081400 0x100>;
clocks = <&cmu_core CLK_GOUT_SSS_ACLK>,
<&cmu_core CLK_GOUT_SSS_PCLK>;
clock-names = "secss", "pclk";
};
pinctrl_hsi: pinctrl@13430000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x13430000 0x1000>;
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
};
pinctrl_peri: pinctrl@139b0000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x139b0000 0x1000>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
};
pinctrl_aud: pinctrl@14a60000 {
compatible = "samsung,exynos850-pinctrl";
reg = <0x14a60000 0x1000>;
};
rtc: rtc@11a30000 {
compatible = "samsung,exynos850-rtc", "samsung,s3c6410-rtc";
reg = <0x11a30000 0x100>;
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cmu_apm CLK_GOUT_RTC_PCLK>;
clock-names = "rtc";
status = "disabled";
};
mmc_0: mmc@12100000 {
compatible = "samsung,exynos850-dw-mshc-smu",
"samsung,exynos7-dw-mshc-smu";
reg = <0x12100000 0x2000>;
interrupts = <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&cmu_core CLK_GOUT_MMC_EMBD_ACLK>,
<&cmu_core CLK_GOUT_MMC_EMBD_SDCLKIN>;
clock-names = "biu", "ciu";
fifo-depth = <0x40>;
status = "disabled";
};
i2c_0: i2c@13830000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13830000 0x100>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C0_PCLK>;
clock-names = "i2c";
status = "disabled";
};
i2c_1: i2c@13840000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13840000 0x100>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C1_PCLK>;
clock-names = "i2c";
status = "disabled";
};
i2c_2: i2c@13850000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13850000 0x100>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c2_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C2_PCLK>;
clock-names = "i2c";
status = "disabled";
};
i2c_3: i2c@13860000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13860000 0x100>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c3_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C3_PCLK>;
clock-names = "i2c";
status = "disabled";
};
i2c_4: i2c@13870000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13870000 0x100>;
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c4_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C4_PCLK>;
clock-names = "i2c";
status = "disabled";
};
/* I2C_5 (also called CAM_PMIC_I2C in TRM) */
i2c_5: i2c@13880000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13880000 0x100>;
interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c5_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C5_PCLK>;
clock-names = "i2c";
status = "disabled";
};
/* I2C_6 (also called MOTOR_I2C in TRM) */
i2c_6: i2c@13890000 {
compatible = "samsung,exynos850-i2c", "samsung,s3c2440-i2c";
reg = <0x13890000 0x100>;
interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c6_pins>;
clocks = <&cmu_peri CLK_GOUT_I2C6_PCLK>;
clock-names = "i2c";
status = "disabled";
};
sysmmu_mfcmscl: sysmmu@12c50000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x12c50000 0x9000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "sysmmu";
clocks = <&cmu_mfcmscl CLK_GOUT_MFCMSCL_SYSMMU_CLK>;
#iommu-cells = <0>;
};
sysmmu_dpu: sysmmu@130c0000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x130c0000 0x9000>;
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "sysmmu";
clocks = <&cmu_dpu CLK_GOUT_DPU_SMMU_CLK>;
#iommu-cells = <0>;
};
sysmmu_is0: sysmmu@14550000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x14550000 0x9000>;
interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "sysmmu";
clocks = <&cmu_is CLK_GOUT_IS_SYSMMU_IS0_CLK>;
#iommu-cells = <0>;
};
sysmmu_is1: sysmmu@14570000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x14570000 0x9000>;
interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "sysmmu";
clocks = <&cmu_is CLK_GOUT_IS_SYSMMU_IS1_CLK>;
#iommu-cells = <0>;
};
sysmmu_aud: sysmmu@14850000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x14850000 0x9000>;
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "sysmmu";
clocks = <&cmu_aud CLK_GOUT_AUD_SYSMMU_CLK>;
#iommu-cells = <0>;
};
sysreg_peri: syscon@10020000 {
compatible = "samsung,exynos850-peri-sysreg",
"samsung,exynos850-sysreg", "syscon";
reg = <0x10020000 0x10000>;
clocks = <&cmu_peri CLK_GOUT_SYSREG_PERI_PCLK>;
};
sysreg_cmgp: syscon@11c20000 {
compatible = "samsung,exynos850-cmgp-sysreg",
"samsung,exynos850-sysreg", "syscon";
reg = <0x11c20000 0x10000>;
clocks = <&cmu_cmgp CLK_GOUT_SYSREG_CMGP_PCLK>;
};
usbdrd: usb@13600000 {
compatible = "samsung,exynos850-dwusb3";
ranges = <0x0 0x13600000 0x10000>;
clocks = <&cmu_hsi CLK_GOUT_USB_BUS_EARLY_CLK>,
<&cmu_hsi CLK_GOUT_USB_REF_CLK>;
clock-names = "bus_early", "ref";
#address-cells = <1>;
#size-cells = <1>;
status = "disabled";
usbdrd_dwc3: usb@0 {
compatible = "snps,dwc3";
reg = <0x0 0x10000>;
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbdrd_phy 0>;
phy-names = "usb2-phy";
};
};
usbdrd_phy: phy@135d0000 {
compatible = "samsung,exynos850-usbdrd-phy";
reg = <0x135d0000 0x100>;
clocks = <&cmu_hsi CLK_GOUT_USB_PHY_ACLK>,
<&cmu_hsi CLK_GOUT_USB_PHY_REF_CLK>;
clock-names = "phy", "ref";
samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <1>;
status = "disabled";
};
usi_uart: usi@138200c0 {
compatible = "samsung,exynos850-usi";
reg = <0x138200c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1010>;
samsung,mode = <USI_V2_UART>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_peri CLK_GOUT_UART_PCLK>,
<&cmu_peri CLK_GOUT_UART_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
serial_0: serial@13820000 {
compatible = "samsung,exynos850-uart";
reg = <0x13820000 0xc0>;
interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins>;
clocks = <&cmu_peri CLK_GOUT_UART_PCLK>,
<&cmu_peri CLK_GOUT_UART_IPCLK>;
clock-names = "uart", "clk_uart_baud0";
status = "disabled";
};
};
usi_hsi2c_0: usi@138a00c0 {
compatible = "samsung,exynos850-usi";
reg = <0x138a00c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1020>;
samsung,mode = <USI_V2_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_peri CLK_GOUT_HSI2C0_PCLK>,
<&cmu_peri CLK_GOUT_HSI2C0_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
hsi2c_0: i2c@138a0000 {
compatible = "samsung,exynos850-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x138a0000 0xc0>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&hsi2c0_pins>;
clocks = <&cmu_peri CLK_GOUT_HSI2C0_IPCLK>,
<&cmu_peri CLK_GOUT_HSI2C0_PCLK>;
clock-names = "hsi2c", "hsi2c_pclk";
status = "disabled";
};
};
usi_hsi2c_1: usi@138b00c0 {
compatible = "samsung,exynos850-usi";
reg = <0x138b00c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1030>;
samsung,mode = <USI_V2_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_peri CLK_GOUT_HSI2C1_PCLK>,
<&cmu_peri CLK_GOUT_HSI2C1_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
hsi2c_1: i2c@138b0000 {
compatible = "samsung,exynos850-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x138b0000 0xc0>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&hsi2c1_pins>;
clocks = <&cmu_peri CLK_GOUT_HSI2C1_IPCLK>,
<&cmu_peri CLK_GOUT_HSI2C1_PCLK>;
clock-names = "hsi2c", "hsi2c_pclk";
status = "disabled";
};
};
usi_hsi2c_2: usi@138c00c0 {
compatible = "samsung,exynos850-usi";
reg = <0x138c00c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1040>;
samsung,mode = <USI_V2_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_peri CLK_GOUT_HSI2C2_PCLK>,
<&cmu_peri CLK_GOUT_HSI2C2_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
hsi2c_2: i2c@138c0000 {
compatible = "samsung,exynos850-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x138c0000 0xc0>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&hsi2c2_pins>;
clocks = <&cmu_peri CLK_GOUT_HSI2C2_IPCLK>,
<&cmu_peri CLK_GOUT_HSI2C2_PCLK>;
clock-names = "hsi2c", "hsi2c_pclk";
status = "disabled";
};
};
usi_spi_0: usi@139400c0 {
compatible = "samsung,exynos850-usi";
reg = <0x139400c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1050>;
samsung,mode = <USI_V2_SPI>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_peri CLK_GOUT_SPI0_PCLK>,
<&cmu_peri CLK_GOUT_SPI0_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
spi_0: spi@13940000 {
compatible = "samsung,exynos850-spi";
reg = <0x13940000 0x30>;
clocks = <&cmu_peri CLK_GOUT_SPI0_PCLK>,
<&cmu_peri CLK_GOUT_SPI0_IPCLK>;
clock-names = "spi", "spi_busclk0";
dmas = <&pdma0 5>, <&pdma0 4>;
dma-names = "tx", "rx";
interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-0 = <&spi0_pins>;
pinctrl-names = "default";
num-cs = <1>;
samsung,spi-src-clk = <0>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
};
usi_cmgp0: usi@11d000c0 {
compatible = "samsung,exynos850-usi";
reg = <0x11d000c0 0x20>;
samsung,sysreg = <&sysreg_cmgp 0x2000>;
samsung,mode = <USI_V2_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI0_PCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI0_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
hsi2c_3: i2c@11d00000 {
compatible = "samsung,exynos850-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x11d00000 0xc0>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&hsi2c3_pins>;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI0_IPCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI0_PCLK>;
clock-names = "hsi2c", "hsi2c_pclk";
status = "disabled";
};
serial_1: serial@11d00000 {
compatible = "samsung,exynos850-uart";
reg = <0x11d00000 0xc0>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart1_single_pins>;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI0_PCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI0_IPCLK>;
clock-names = "uart", "clk_uart_baud0";
status = "disabled";
};
spi_1: spi@11d00000 {
compatible = "samsung,exynos850-spi";
reg = <0x11d00000 0x30>;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI0_PCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI0_IPCLK>;
clock-names = "spi", "spi_busclk0";
dmas = <&pdma0 12>, <&pdma0 13>;
dma-names = "tx", "rx";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-0 = <&spi1_pins>;
pinctrl-names = "default";
num-cs = <1>;
samsung,spi-src-clk = <0>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
};
usi_cmgp1: usi@11d200c0 {
compatible = "samsung,exynos850-usi";
reg = <0x11d200c0 0x20>;
samsung,sysreg = <&sysreg_cmgp 0x2010>;
samsung,mode = <USI_V2_I2C>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI1_PCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI1_IPCLK>;
clock-names = "pclk", "ipclk";
status = "disabled";
hsi2c_4: i2c@11d20000 {
compatible = "samsung,exynos850-hsi2c",
"samsung,exynosautov9-hsi2c";
reg = <0x11d20000 0xc0>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&hsi2c4_pins>;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI1_IPCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI1_PCLK>;
clock-names = "hsi2c", "hsi2c_pclk";
status = "disabled";
};
serial_2: serial@11d20000 {
compatible = "samsung,exynos850-uart";
reg = <0x11d20000 0xc0>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart2_single_pins>;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI1_PCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI1_IPCLK>;
clock-names = "uart", "clk_uart_baud0";
status = "disabled";
};
spi_2: spi@11d20000 {
compatible = "samsung,exynos850-spi";
reg = <0x11d20000 0x30>;
clocks = <&cmu_cmgp CLK_GOUT_CMGP_USI1_PCLK>,
<&cmu_cmgp CLK_GOUT_CMGP_USI1_IPCLK>;
clock-names = "spi", "spi_busclk0";
dmas = <&pdma0 14>, <&pdma0 15>;
dma-names = "tx", "rx";
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-0 = <&spi2_pins>;
pinctrl-names = "default";
num-cs = <1>;
samsung,spi-src-clk = <0>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
};
};
};
#include "exynos850-pinctrl.dtsi"
|
/*
* Copyright (C) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _mmhub_2_0_0_SH_MASK_HEADER
#define _mmhub_2_0_0_SH_MASK_HEADER
// addressBlock: mmhub_dagbdec
//DAGB0_RDCLI0
#define DAGB0_RDCLI0__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI0__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI0__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI0__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI0__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI0__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI0__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI0__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI0__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI0__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI0__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI0__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI1
#define DAGB0_RDCLI1__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI1__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI1__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI1__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI1__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI1__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI1__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI1__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI1__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI1__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI1__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI1__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI2
#define DAGB0_RDCLI2__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI2__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI2__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI2__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI2__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI2__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI2__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI2__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI2__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI2__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI2__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI2__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI3
#define DAGB0_RDCLI3__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI3__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI3__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI3__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI3__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI3__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI3__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI3__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI3__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI3__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI3__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI3__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI4
#define DAGB0_RDCLI4__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI4__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI4__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI4__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI4__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI4__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI4__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI4__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI4__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI4__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI4__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI4__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI5
#define DAGB0_RDCLI5__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI5__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI5__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI5__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI5__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI5__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI5__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI5__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI5__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI5__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI5__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI5__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI6
#define DAGB0_RDCLI6__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI6__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI6__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI6__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI6__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI6__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI6__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI6__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI6__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI6__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI6__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI6__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI7
#define DAGB0_RDCLI7__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI7__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI7__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI7__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI7__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI7__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI7__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI7__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI7__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI7__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI7__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI7__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI8
#define DAGB0_RDCLI8__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI8__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI8__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI8__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI8__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI8__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI8__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI8__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI8__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI8__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI8__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI8__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI9
#define DAGB0_RDCLI9__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI9__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI9__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI9__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI9__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI9__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI9__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI9__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI9__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI9__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI9__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI9__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI10
#define DAGB0_RDCLI10__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI10__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI10__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI10__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI10__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI10__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI10__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI10__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI10__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI10__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI10__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI10__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI11
#define DAGB0_RDCLI11__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI11__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI11__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI11__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI11__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI11__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI11__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI11__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI11__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI11__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI11__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI11__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI12
#define DAGB0_RDCLI12__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI12__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI12__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI12__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI12__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI12__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI12__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI12__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI12__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI12__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI12__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI12__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI13
#define DAGB0_RDCLI13__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI13__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI13__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI13__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI13__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI13__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI13__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI13__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI13__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI13__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI13__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI13__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI14
#define DAGB0_RDCLI14__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI14__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI14__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI14__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI14__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI14__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI14__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI14__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI14__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI14__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI14__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI14__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI15
#define DAGB0_RDCLI15__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI15__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI15__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI15__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI15__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI15__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI15__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI15__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI15__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI15__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI15__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI15__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI16
#define DAGB0_RDCLI16__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI16__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI16__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI16__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI16__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI16__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI16__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI16__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI16__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI16__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI16__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI16__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI16__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI16__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI16__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI16__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI16__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI16__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI16__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI16__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI17
#define DAGB0_RDCLI17__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI17__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI17__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI17__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI17__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI17__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI17__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI17__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI17__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI17__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI17__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI17__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI17__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI17__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI17__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI17__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI17__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI17__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI17__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI17__MAX_OSD_MASK 0xFC000000L
//DAGB0_RDCLI18
#define DAGB0_RDCLI18__VIRT_CHAN__SHIFT 0x0
#define DAGB0_RDCLI18__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_RDCLI18__URG_HIGH__SHIFT 0x4
#define DAGB0_RDCLI18__URG_LOW__SHIFT 0x8
#define DAGB0_RDCLI18__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_RDCLI18__MAX_BW__SHIFT 0xd
#define DAGB0_RDCLI18__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_RDCLI18__MIN_BW__SHIFT 0x16
#define DAGB0_RDCLI18__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_RDCLI18__MAX_OSD__SHIFT 0x1a
#define DAGB0_RDCLI18__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_RDCLI18__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_RDCLI18__URG_HIGH_MASK 0x000000F0L
#define DAGB0_RDCLI18__URG_LOW_MASK 0x00000F00L
#define DAGB0_RDCLI18__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_RDCLI18__MAX_BW_MASK 0x001FE000L
#define DAGB0_RDCLI18__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_RDCLI18__MIN_BW_MASK 0x01C00000L
#define DAGB0_RDCLI18__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_RDCLI18__MAX_OSD_MASK 0xFC000000L
//DAGB0_RD_CNTL
#define DAGB0_RD_CNTL__SCLK_FREQ__SHIFT 0x0
#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
#define DAGB0_RD_CNTL__IO_LEVEL__SHIFT 0x11
#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
#define DAGB0_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
#define DAGB0_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
#define DAGB0_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
#define DAGB0_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
//DAGB0_RD_GMI_CNTL
#define DAGB0_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
#define DAGB0_RD_GMI_CNTL__LEVEL__SHIFT 0x6
#define DAGB0_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
#define DAGB0_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
#define DAGB0_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
#define DAGB0_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
#define DAGB0_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
#define DAGB0_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
//DAGB0_RD_ADDR_DAGB
#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
#define DAGB0_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
#define DAGB0_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
//DAGB0_RD_OUTPUT_DAGB_MAX_BURST
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
//DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
//DAGB0_RD_CGTT_CLK_CTRL
#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
//DAGB0_L1TLB_RD_CGTT_CLK_CTRL
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
//DAGB0_ATCVM_RD_CGTT_CLK_CTRL
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
//DAGB0_RD_ADDR_DAGB_MAX_BURST0
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
//DAGB0_RD_ADDR_DAGB_LAZY_TIMER0
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
//DAGB0_RD_ADDR_DAGB_MAX_BURST1
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
//DAGB0_RD_ADDR_DAGB_LAZY_TIMER1
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
//DAGB0_RD_ADDR_DAGB_MAX_BURST2
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT16__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT17__SHIFT 0x4
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT18__SHIFT 0x8
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT19__SHIFT 0xc
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT20__SHIFT 0x10
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT21__SHIFT 0x14
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT22__SHIFT 0x18
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT23__SHIFT 0x1c
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT16_MASK 0x0000000FL
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT17_MASK 0x000000F0L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT18_MASK 0x00000F00L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT19_MASK 0x0000F000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT20_MASK 0x000F0000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT21_MASK 0x00F00000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT22_MASK 0x0F000000L
#define DAGB0_RD_ADDR_DAGB_MAX_BURST2__CLIENT23_MASK 0xF0000000L
//DAGB0_RD_ADDR_DAGB_LAZY_TIMER2
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT16__SHIFT 0x0
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT17__SHIFT 0x4
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT18__SHIFT 0x8
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT19__SHIFT 0xc
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT20__SHIFT 0x10
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT21__SHIFT 0x14
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT22__SHIFT 0x18
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT23__SHIFT 0x1c
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT16_MASK 0x0000000FL
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT17_MASK 0x000000F0L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT18_MASK 0x00000F00L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT19_MASK 0x0000F000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT20_MASK 0x000F0000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT21_MASK 0x00F00000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT22_MASK 0x0F000000L
#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER2__CLIENT23_MASK 0xF0000000L
//DAGB0_RD_VC0_CNTL
#define DAGB0_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC1_CNTL
#define DAGB0_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC2_CNTL
#define DAGB0_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC3_CNTL
#define DAGB0_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC4_CNTL
#define DAGB0_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC5_CNTL
#define DAGB0_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC6_CNTL
#define DAGB0_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_VC7_CNTL
#define DAGB0_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_RD_CNTL_MISC
#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
#define DAGB0_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
#define DAGB0_RD_CNTL_MISC__HDP_CID__SHIFT 0x1a
#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
#define DAGB0_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
#define DAGB0_RD_CNTL_MISC__HDP_CID_MASK 0x7C000000L
//DAGB0_RD_TLB_CREDIT
#define DAGB0_RD_TLB_CREDIT__TLB0__SHIFT 0x0
#define DAGB0_RD_TLB_CREDIT__TLB1__SHIFT 0x5
#define DAGB0_RD_TLB_CREDIT__TLB2__SHIFT 0xa
#define DAGB0_RD_TLB_CREDIT__TLB3__SHIFT 0xf
#define DAGB0_RD_TLB_CREDIT__TLB4__SHIFT 0x14
#define DAGB0_RD_TLB_CREDIT__TLB5__SHIFT 0x19
#define DAGB0_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
#define DAGB0_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
#define DAGB0_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
#define DAGB0_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
#define DAGB0_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
#define DAGB0_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
//DAGB0_RDCLI_ASK_PENDING
#define DAGB0_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
#define DAGB0_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_RDCLI_GO_PENDING
#define DAGB0_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB0_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_RDCLI_GBLSEND_PENDING
#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_RDCLI_TLB_PENDING
#define DAGB0_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
#define DAGB0_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_RDCLI_OARB_PENDING
#define DAGB0_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
#define DAGB0_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_RDCLI_OSD_PENDING
#define DAGB0_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
#define DAGB0_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI0
#define DAGB0_WRCLI0__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI0__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI0__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI0__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI0__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI0__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI0__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI0__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI0__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI0__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI0__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI0__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI1
#define DAGB0_WRCLI1__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI1__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI1__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI1__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI1__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI1__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI1__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI1__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI1__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI1__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI1__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI1__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI2
#define DAGB0_WRCLI2__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI2__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI2__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI2__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI2__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI2__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI2__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI2__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI2__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI2__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI2__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI2__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI3
#define DAGB0_WRCLI3__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI3__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI3__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI3__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI3__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI3__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI3__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI3__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI3__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI3__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI3__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI3__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI4
#define DAGB0_WRCLI4__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI4__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI4__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI4__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI4__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI4__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI4__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI4__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI4__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI4__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI4__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI4__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI5
#define DAGB0_WRCLI5__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI5__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI5__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI5__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI5__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI5__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI5__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI5__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI5__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI5__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI5__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI5__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI6
#define DAGB0_WRCLI6__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI6__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI6__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI6__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI6__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI6__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI6__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI6__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI6__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI6__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI6__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI6__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI7
#define DAGB0_WRCLI7__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI7__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI7__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI7__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI7__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI7__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI7__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI7__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI7__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI7__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI7__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI7__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI8
#define DAGB0_WRCLI8__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI8__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI8__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI8__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI8__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI8__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI8__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI8__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI8__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI8__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI8__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI8__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI9
#define DAGB0_WRCLI9__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI9__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI9__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI9__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI9__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI9__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI9__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI9__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI9__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI9__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI9__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI9__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI10
#define DAGB0_WRCLI10__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI10__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI10__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI10__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI10__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI10__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI10__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI10__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI10__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI10__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI10__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI10__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI11
#define DAGB0_WRCLI11__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI11__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI11__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI11__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI11__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI11__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI11__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI11__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI11__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI11__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI11__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI11__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI12
#define DAGB0_WRCLI12__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI12__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI12__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI12__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI12__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI12__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI12__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI12__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI12__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI12__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI12__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI12__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI13
#define DAGB0_WRCLI13__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI13__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI13__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI13__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI13__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI13__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI13__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI13__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI13__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI13__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI13__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI13__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI14
#define DAGB0_WRCLI14__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI14__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI14__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI14__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI14__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI14__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI14__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI14__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI14__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI14__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI14__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI14__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI15
#define DAGB0_WRCLI15__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI15__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI15__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI15__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI15__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI15__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI15__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI15__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI15__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI15__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI15__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI15__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI16
#define DAGB0_WRCLI16__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI16__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI16__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI16__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI16__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI16__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI16__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI16__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI16__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI16__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI16__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI16__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI16__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI16__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI16__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI16__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI16__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI16__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI16__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI16__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI17
#define DAGB0_WRCLI17__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI17__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI17__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI17__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI17__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI17__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI17__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI17__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI17__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI17__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI17__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI17__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI17__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI17__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI17__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI17__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI17__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI17__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI17__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI17__MAX_OSD_MASK 0xFC000000L
//DAGB0_WRCLI18
#define DAGB0_WRCLI18__VIRT_CHAN__SHIFT 0x0
#define DAGB0_WRCLI18__CHECK_TLB_CREDIT__SHIFT 0x3
#define DAGB0_WRCLI18__URG_HIGH__SHIFT 0x4
#define DAGB0_WRCLI18__URG_LOW__SHIFT 0x8
#define DAGB0_WRCLI18__MAX_BW_ENABLE__SHIFT 0xc
#define DAGB0_WRCLI18__MAX_BW__SHIFT 0xd
#define DAGB0_WRCLI18__MIN_BW_ENABLE__SHIFT 0x15
#define DAGB0_WRCLI18__MIN_BW__SHIFT 0x16
#define DAGB0_WRCLI18__OSD_LIMITER_ENABLE__SHIFT 0x19
#define DAGB0_WRCLI18__MAX_OSD__SHIFT 0x1a
#define DAGB0_WRCLI18__VIRT_CHAN_MASK 0x00000007L
#define DAGB0_WRCLI18__CHECK_TLB_CREDIT_MASK 0x00000008L
#define DAGB0_WRCLI18__URG_HIGH_MASK 0x000000F0L
#define DAGB0_WRCLI18__URG_LOW_MASK 0x00000F00L
#define DAGB0_WRCLI18__MAX_BW_ENABLE_MASK 0x00001000L
#define DAGB0_WRCLI18__MAX_BW_MASK 0x001FE000L
#define DAGB0_WRCLI18__MIN_BW_ENABLE_MASK 0x00200000L
#define DAGB0_WRCLI18__MIN_BW_MASK 0x01C00000L
#define DAGB0_WRCLI18__OSD_LIMITER_ENABLE_MASK 0x02000000L
#define DAGB0_WRCLI18__MAX_OSD_MASK 0xFC000000L
//DAGB0_WR_CNTL
#define DAGB0_WR_CNTL__SCLK_FREQ__SHIFT 0x0
#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
#define DAGB0_WR_CNTL__IO_LEVEL__SHIFT 0x11
#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
#define DAGB0_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
#define DAGB0_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
#define DAGB0_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
#define DAGB0_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
//DAGB0_WR_GMI_CNTL
#define DAGB0_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
#define DAGB0_WR_GMI_CNTL__LEVEL__SHIFT 0x6
#define DAGB0_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
#define DAGB0_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
#define DAGB0_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
#define DAGB0_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
#define DAGB0_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
#define DAGB0_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
//DAGB0_WR_ADDR_DAGB
#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
#define DAGB0_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
#define DAGB0_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
//DAGB0_WR_OUTPUT_DAGB_MAX_BURST
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
//DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
//DAGB0_WR_CGTT_CLK_CTRL
#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
//DAGB0_L1TLB_WR_CGTT_CLK_CTRL
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
//DAGB0_ATCVM_WR_CGTT_CLK_CTRL
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
//DAGB0_WR_ADDR_DAGB_MAX_BURST0
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
//DAGB0_WR_ADDR_DAGB_LAZY_TIMER0
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
//DAGB0_WR_ADDR_DAGB_MAX_BURST1
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
//DAGB0_WR_ADDR_DAGB_LAZY_TIMER1
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
//DAGB0_WR_ADDR_DAGB_MAX_BURST2
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT16__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT17__SHIFT 0x4
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT18__SHIFT 0x8
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT19__SHIFT 0xc
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT20__SHIFT 0x10
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT21__SHIFT 0x14
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT22__SHIFT 0x18
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT23__SHIFT 0x1c
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT16_MASK 0x0000000FL
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT17_MASK 0x000000F0L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT18_MASK 0x00000F00L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT19_MASK 0x0000F000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT20_MASK 0x000F0000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT21_MASK 0x00F00000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT22_MASK 0x0F000000L
#define DAGB0_WR_ADDR_DAGB_MAX_BURST2__CLIENT23_MASK 0xF0000000L
//DAGB0_WR_ADDR_DAGB_LAZY_TIMER2
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT16__SHIFT 0x0
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT17__SHIFT 0x4
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT18__SHIFT 0x8
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT19__SHIFT 0xc
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT20__SHIFT 0x10
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT21__SHIFT 0x14
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT22__SHIFT 0x18
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT23__SHIFT 0x1c
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT16_MASK 0x0000000FL
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT17_MASK 0x000000F0L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT18_MASK 0x00000F00L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT19_MASK 0x0000F000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT20_MASK 0x000F0000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT21_MASK 0x00F00000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT22_MASK 0x0F000000L
#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER2__CLIENT23_MASK 0xF0000000L
//DAGB0_WR_DATA_DAGB
#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
#define DAGB0_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
#define DAGB0_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
//DAGB0_WR_DATA_DAGB_MAX_BURST0
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
//DAGB0_WR_DATA_DAGB_LAZY_TIMER0
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
//DAGB0_WR_DATA_DAGB_MAX_BURST1
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
//DAGB0_WR_DATA_DAGB_LAZY_TIMER1
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
//DAGB0_WR_DATA_DAGB_MAX_BURST2
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT16__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT17__SHIFT 0x4
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT18__SHIFT 0x8
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT19__SHIFT 0xc
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT20__SHIFT 0x10
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT21__SHIFT 0x14
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT22__SHIFT 0x18
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT23__SHIFT 0x1c
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT16_MASK 0x0000000FL
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT17_MASK 0x000000F0L
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT18_MASK 0x00000F00L
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT19_MASK 0x0000F000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT20_MASK 0x000F0000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT21_MASK 0x00F00000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT22_MASK 0x0F000000L
#define DAGB0_WR_DATA_DAGB_MAX_BURST2__CLIENT23_MASK 0xF0000000L
//DAGB0_WR_DATA_DAGB_LAZY_TIMER2
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT16__SHIFT 0x0
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT17__SHIFT 0x4
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT18__SHIFT 0x8
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT19__SHIFT 0xc
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT20__SHIFT 0x10
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT21__SHIFT 0x14
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT22__SHIFT 0x18
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT23__SHIFT 0x1c
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT16_MASK 0x0000000FL
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT17_MASK 0x000000F0L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT18_MASK 0x00000F00L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT19_MASK 0x0000F000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT20_MASK 0x000F0000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT21_MASK 0x00F00000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT22_MASK 0x0F000000L
#define DAGB0_WR_DATA_DAGB_LAZY_TIMER2__CLIENT23_MASK 0xF0000000L
//DAGB0_WR_VC0_CNTL
#define DAGB0_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC1_CNTL
#define DAGB0_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC2_CNTL
#define DAGB0_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC3_CNTL
#define DAGB0_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC4_CNTL
#define DAGB0_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC5_CNTL
#define DAGB0_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC6_CNTL
#define DAGB0_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_VC7_CNTL
#define DAGB0_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
#define DAGB0_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
#define DAGB0_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
#define DAGB0_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
#define DAGB0_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
#define DAGB0_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
#define DAGB0_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
#define DAGB0_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
#define DAGB0_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
#define DAGB0_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
//DAGB0_WR_CNTL_MISC
#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
#define DAGB0_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
#define DAGB0_WR_CNTL_MISC__HDP_CID__SHIFT 0x1a
#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
#define DAGB0_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
#define DAGB0_WR_CNTL_MISC__HDP_CID_MASK 0x7C000000L
//DAGB0_WR_TLB_CREDIT
#define DAGB0_WR_TLB_CREDIT__TLB0__SHIFT 0x0
#define DAGB0_WR_TLB_CREDIT__TLB1__SHIFT 0x5
#define DAGB0_WR_TLB_CREDIT__TLB2__SHIFT 0xa
#define DAGB0_WR_TLB_CREDIT__TLB3__SHIFT 0xf
#define DAGB0_WR_TLB_CREDIT__TLB4__SHIFT 0x14
#define DAGB0_WR_TLB_CREDIT__TLB5__SHIFT 0x19
#define DAGB0_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
#define DAGB0_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
#define DAGB0_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
#define DAGB0_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
#define DAGB0_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
#define DAGB0_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
//DAGB0_WR_DATA_CREDIT
#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
//DAGB0_WR_MISC_CREDIT
#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
//DAGB0_WRCLI_ASK_PENDING
#define DAGB0_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_GO_PENDING
#define DAGB0_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_GBLSEND_PENDING
#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_TLB_PENDING
#define DAGB0_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_OARB_PENDING
#define DAGB0_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_OSD_PENDING
#define DAGB0_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_DBUS_ASK_PENDING
#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_DBUS_GO_PENDING
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE
#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB0_DAGB_DLY
#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
#define DAGB0_DAGB_DLY__POS__SHIFT 0x10
#define DAGB0_DAGB_DLY__DLY_MASK 0x000000FFL
#define DAGB0_DAGB_DLY__CLI_MASK 0x0000FF00L
#define DAGB0_DAGB_DLY__POS_MASK 0x000F0000L
//DAGB0_CNTL_MISC
#define DAGB0_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
#define DAGB0_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
#define DAGB0_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
#define DAGB0_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
#define DAGB0_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
#define DAGB0_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
#define DAGB0_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
#define DAGB0_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
#define DAGB0_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
#define DAGB0_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
#define DAGB0_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
#define DAGB0_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
#define DAGB0_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
#define DAGB0_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
#define DAGB0_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
#define DAGB0_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
#define DAGB0_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
#define DAGB0_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
//DAGB0_CNTL_MISC2
#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
#define DAGB0_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
#define DAGB0_CNTL_MISC2__ENABLE_PARITY_CHECK__SHIFT 0xb
#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
#define DAGB0_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
#define DAGB0_CNTL_MISC2__ENABLE_PARITY_CHECK_MASK 0x00000800L
//DAGB0_FIFO_EMPTY
#define DAGB0_FIFO_EMPTY__EMPTY__SHIFT 0x0
#define DAGB0_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
//DAGB0_FIFO_FULL
#define DAGB0_FIFO_FULL__FULL__SHIFT 0x0
#define DAGB0_FIFO_FULL__FULL_MASK 0x007FFFFFL
//DAGB0_WR_CREDITS_FULL
#define DAGB0_WR_CREDITS_FULL__FULL__SHIFT 0x0
#define DAGB0_WR_CREDITS_FULL__FULL_MASK 0x0007FFFFL
//DAGB0_RD_CREDITS_FULL
#define DAGB0_RD_CREDITS_FULL__FULL__SHIFT 0x0
#define DAGB0_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
//DAGB0_PERFCOUNTER_LO
#define DAGB0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
#define DAGB0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
//DAGB0_PERFCOUNTER_HI
#define DAGB0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
#define DAGB0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
//DAGB0_PERFCOUNTER0_CFG
#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
#define DAGB0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
#define DAGB0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
#define DAGB0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
#define DAGB0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
//DAGB0_PERFCOUNTER1_CFG
#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
#define DAGB0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
#define DAGB0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
#define DAGB0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
#define DAGB0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
//DAGB0_PERFCOUNTER2_CFG
#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
#define DAGB0_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
#define DAGB0_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
#define DAGB0_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
#define DAGB0_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
//DAGB0_PERFCOUNTER_RSLT_CNTL
#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
//DAGB0_RESERVE0
#define DAGB0_RESERVE0__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE1
#define DAGB0_RESERVE1__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE2
#define DAGB0_RESERVE2__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE3
#define DAGB0_RESERVE3__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE4
#define DAGB0_RESERVE4__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE5
#define DAGB0_RESERVE5__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE6
#define DAGB0_RESERVE6__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE7
#define DAGB0_RESERVE7__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE8
#define DAGB0_RESERVE8__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE9
#define DAGB0_RESERVE9__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE10
#define DAGB0_RESERVE10__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE11
#define DAGB0_RESERVE11__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE12
#define DAGB0_RESERVE12__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE13
#define DAGB0_RESERVE13__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE14
#define DAGB0_RESERVE14__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE14__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE15
#define DAGB0_RESERVE15__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE15__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE16
#define DAGB0_RESERVE16__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE16__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE17
#define DAGB0_RESERVE17__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE17__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE18
#define DAGB0_RESERVE18__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE18__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE19
#define DAGB0_RESERVE19__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE19__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE20
#define DAGB0_RESERVE20__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE20__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE21
#define DAGB0_RESERVE21__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE21__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE22
#define DAGB0_RESERVE22__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE22__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE23
#define DAGB0_RESERVE23__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE23__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE24
#define DAGB0_RESERVE24__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE24__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE25
#define DAGB0_RESERVE25__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE25__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE26
#define DAGB0_RESERVE26__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE26__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE27
#define DAGB0_RESERVE27__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE27__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE28
#define DAGB0_RESERVE28__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE28__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE29
#define DAGB0_RESERVE29__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE29__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE30
#define DAGB0_RESERVE30__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE30__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE31
#define DAGB0_RESERVE31__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE31__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE32
#define DAGB0_RESERVE32__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE32__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE33
#define DAGB0_RESERVE33__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE33__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE34
#define DAGB0_RESERVE34__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE34__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE35
#define DAGB0_RESERVE35__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE35__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE36
#define DAGB0_RESERVE36__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE36__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE37
#define DAGB0_RESERVE37__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE37__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE38
#define DAGB0_RESERVE38__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE38__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE39
#define DAGB0_RESERVE39__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE39__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE40
#define DAGB0_RESERVE40__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE40__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE41
#define DAGB0_RESERVE41__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE41__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE42
#define DAGB0_RESERVE42__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE42__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE43
#define DAGB0_RESERVE43__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE43__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE44
#define DAGB0_RESERVE44__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE44__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE45
#define DAGB0_RESERVE45__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE45__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE46
#define DAGB0_RESERVE46__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE46__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE47
#define DAGB0_RESERVE47__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE47__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE48
#define DAGB0_RESERVE48__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE48__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE49
#define DAGB0_RESERVE49__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE49__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE50
#define DAGB0_RESERVE50__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE50__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE51
#define DAGB0_RESERVE51__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE51__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE52
#define DAGB0_RESERVE52__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE52__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE53
#define DAGB0_RESERVE53__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE53__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE54
#define DAGB0_RESERVE54__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE54__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE55
#define DAGB0_RESERVE55__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE55__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE56
#define DAGB0_RESERVE56__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE56__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE57
#define DAGB0_RESERVE57__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE57__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE58
#define DAGB0_RESERVE58__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE58__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE59
#define DAGB0_RESERVE59__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE59__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE60
#define DAGB0_RESERVE60__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE60__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE61
#define DAGB0_RESERVE61__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE61__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE62
#define DAGB0_RESERVE62__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE62__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE63
#define DAGB0_RESERVE63__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE63__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE64
#define DAGB0_RESERVE64__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE64__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE65
#define DAGB0_RESERVE65__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE65__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE66
#define DAGB0_RESERVE66__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE66__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE67
#define DAGB0_RESERVE67__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE67__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE68
#define DAGB0_RESERVE68__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE68__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE69
#define DAGB0_RESERVE69__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE69__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE70
#define DAGB0_RESERVE70__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE70__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE71
#define DAGB0_RESERVE71__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE71__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE72
#define DAGB0_RESERVE72__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE72__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE73
#define DAGB0_RESERVE73__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE73__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE74
#define DAGB0_RESERVE74__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE74__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE75
#define DAGB0_RESERVE75__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE75__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE76
#define DAGB0_RESERVE76__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE76__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE77
#define DAGB0_RESERVE77__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE77__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE78
#define DAGB0_RESERVE78__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE78__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE79
#define DAGB0_RESERVE79__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE79__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE80
#define DAGB0_RESERVE80__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE80__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE81
#define DAGB0_RESERVE81__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE81__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE82
#define DAGB0_RESERVE82__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE82__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE83
#define DAGB0_RESERVE83__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE83__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE84
#define DAGB0_RESERVE84__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE84__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE85
#define DAGB0_RESERVE85__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE85__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE86
#define DAGB0_RESERVE86__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE86__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE87
#define DAGB0_RESERVE87__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE87__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE88
#define DAGB0_RESERVE88__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE88__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE89
#define DAGB0_RESERVE89__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE89__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE90
#define DAGB0_RESERVE90__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE90__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE91
#define DAGB0_RESERVE91__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE91__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE92
#define DAGB0_RESERVE92__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE92__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE93
#define DAGB0_RESERVE93__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE93__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE94
#define DAGB0_RESERVE94__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE94__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE95
#define DAGB0_RESERVE95__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE95__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE96
#define DAGB0_RESERVE96__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE96__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE97
#define DAGB0_RESERVE97__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE97__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE98
#define DAGB0_RESERVE98__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE98__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE99
#define DAGB0_RESERVE99__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE99__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE100
#define DAGB0_RESERVE100__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE100__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE101
#define DAGB0_RESERVE101__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE101__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE102
#define DAGB0_RESERVE102__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE102__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE103
#define DAGB0_RESERVE103__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE103__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE104
#define DAGB0_RESERVE104__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE104__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE105
#define DAGB0_RESERVE105__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE105__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE106
#define DAGB0_RESERVE106__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE106__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE107
#define DAGB0_RESERVE107__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE107__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE108
#define DAGB0_RESERVE108__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE108__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE109
#define DAGB0_RESERVE109__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE109__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE110
#define DAGB0_RESERVE110__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE110__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE111
#define DAGB0_RESERVE111__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE111__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE112
#define DAGB0_RESERVE112__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE112__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE113
#define DAGB0_RESERVE113__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE113__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE114
#define DAGB0_RESERVE114__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE114__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE115
#define DAGB0_RESERVE115__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE115__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE116
#define DAGB0_RESERVE116__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE116__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE117
#define DAGB0_RESERVE117__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE117__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE118
#define DAGB0_RESERVE118__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE118__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE119
#define DAGB0_RESERVE119__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE119__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE120
#define DAGB0_RESERVE120__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE120__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE121
#define DAGB0_RESERVE121__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE121__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE122
#define DAGB0_RESERVE122__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE122__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE123
#define DAGB0_RESERVE123__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE123__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE124
#define DAGB0_RESERVE124__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE124__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE125
#define DAGB0_RESERVE125__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE125__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE126
#define DAGB0_RESERVE126__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE126__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE127
#define DAGB0_RESERVE127__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE127__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE128
#define DAGB0_RESERVE128__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE128__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE129
#define DAGB0_RESERVE129__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE129__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE130
#define DAGB0_RESERVE130__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE130__RESERVE_MASK 0xFFFFFFFFL
//DAGB0_RESERVE131
#define DAGB0_RESERVE131__RESERVE__SHIFT 0x0
#define DAGB0_RESERVE131__RESERVE_MASK 0xFFFFFFFFL
// addressBlock: mmhub_mmea_mmeadec
//MMEA0_DRAM_RD_CLI2GRP_MAP0
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
//MMEA0_DRAM_RD_CLI2GRP_MAP1
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
//MMEA0_DRAM_WR_CLI2GRP_MAP0
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
//MMEA0_DRAM_WR_CLI2GRP_MAP1
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
//MMEA0_DRAM_RD_GRP2VC_MAP
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
//MMEA0_DRAM_WR_GRP2VC_MAP
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
//MMEA0_DRAM_RD_LAZY
#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
//MMEA0_DRAM_WR_LAZY
#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
//MMEA0_DRAM_RD_CAM_CNTL
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
//MMEA0_DRAM_WR_CAM_CNTL
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
//MMEA0_DRAM_PAGE_BURST
#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
//MMEA0_DRAM_RD_PRI_AGE
#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
//MMEA0_DRAM_WR_PRI_AGE
#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
//MMEA0_DRAM_RD_PRI_QUEUING
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
//MMEA0_DRAM_WR_PRI_QUEUING
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
//MMEA0_DRAM_RD_PRI_FIXED
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
//MMEA0_DRAM_WR_PRI_FIXED
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
//MMEA0_DRAM_RD_PRI_URGENCY
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
//MMEA0_DRAM_WR_PRI_URGENCY
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
//MMEA0_DRAM_RD_PRI_QUANT_PRI1
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_DRAM_RD_PRI_QUANT_PRI2
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_DRAM_RD_PRI_QUANT_PRI3
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_DRAM_WR_PRI_QUANT_PRI1
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_DRAM_WR_PRI_QUANT_PRI2
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_DRAM_WR_PRI_QUANT_PRI3
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_ADDRNORM_BASE_ADDR0
#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9
#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L
#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
//MMEA0_ADDRNORM_LIMIT_ADDR0
#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
//MMEA0_ADDRNORM_BASE_ADDR1
#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9
#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L
#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
//MMEA0_ADDRNORM_LIMIT_ADDR1
#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
//MMEA0_ADDRNORM_OFFSET_ADDR1
#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
//MMEA0_ADDRNORMDRAM_HOLE_CNTL
#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
//MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG
#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0
#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6
#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL
#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L
//MMEA0_ADDRDEC_BANK_CFG
#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x5
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xa
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xd
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x10
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x11
#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000001FL
#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x000003E0L
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00001C00L
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x0000E000L
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00010000L
#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00020000L
//MMEA0_ADDRDEC_MISC_CFG
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN3__SHIFT 0x3
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN4__SHIFT 0x4
#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN3_MASK 0x00000008L
#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN4_MASK 0x00000010L
#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_PC
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
//MMEA0_ADDRDECDRAM_ADDR_HASH_PC2
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000001FL
//MMEA0_ADDRDECDRAM_ADDR_HASH_CS0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDECDRAM_ADDR_HASH_CS1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDECDRAM_HARVEST_ENABLE
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
//MMEA0_ADDRDECDRAM_HARVNA_ADDR_START0
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START0__START__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START0__BANK_XOR__SHIFT 0x1c
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START0__START_MASK 0x000FFFFFL
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START0__BANK_XOR_MASK 0xF0000000L
//MMEA0_ADDRDECDRAM_HARVNA_ADDR_END0
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_END0__END__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_END0__END_MASK 0x000FFFFFL
//MMEA0_ADDRDECDRAM_HARVNA_ADDR_START1
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START1__START__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START1__BANK_XOR__SHIFT 0x1c
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START1__START_MASK 0x000FFFFFL
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_START1__BANK_XOR_MASK 0xF0000000L
//MMEA0_ADDRDECDRAM_HARVNA_ADDR_END1
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_END1__END__SHIFT 0x0
#define MMEA0_ADDRDECDRAM_HARVNA_ADDR_END1__END_MASK 0x000FFFFFL
//MMEA0_ADDRDEC0_BASE_ADDR_CS0
#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_CS1
#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_CS2
#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_CS3
#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_SECCS0
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_SECCS1
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_SECCS2
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_BASE_ADDR_SECCS3
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_ADDR_MASK_CS01
#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_ADDR_MASK_CS23
#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_ADDR_MASK_SECCS01
#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_ADDR_MASK_SECCS23
#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC0_ADDR_CFG_CS01
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
//MMEA0_ADDRDEC0_ADDR_CFG_CS23
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
//MMEA0_ADDRDEC0_ADDR_SEL_CS01
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
//MMEA0_ADDRDEC0_ADDR_SEL_CS23
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
//MMEA0_ADDRDEC0_COL_SEL_LO_CS01
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
//MMEA0_ADDRDEC0_COL_SEL_LO_CS23
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
//MMEA0_ADDRDEC0_COL_SEL_HI_CS01
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
//MMEA0_ADDRDEC0_COL_SEL_HI_CS23
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
//MMEA0_ADDRDEC0_RM_SEL_CS01
#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC0_RM_SEL_CS23
#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC0_RM_SEL_SECCS01
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC0_RM_SEL_SECCS23
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC1_BASE_ADDR_CS0
#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_CS1
#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_CS2
#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_CS3
#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_SECCS0
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_SECCS1
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_SECCS2
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_BASE_ADDR_SECCS3
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_ADDR_MASK_CS01
#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_ADDR_MASK_CS23
#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_ADDR_MASK_SECCS01
#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_ADDR_MASK_SECCS23
#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
//MMEA0_ADDRDEC1_ADDR_CFG_CS01
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
//MMEA0_ADDRDEC1_ADDR_CFG_CS23
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
//MMEA0_ADDRDEC1_ADDR_SEL_CS01
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
//MMEA0_ADDRDEC1_ADDR_SEL_CS23
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
//MMEA0_ADDRDEC1_COL_SEL_LO_CS01
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
//MMEA0_ADDRDEC1_COL_SEL_LO_CS23
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
//MMEA0_ADDRDEC1_COL_SEL_HI_CS01
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
//MMEA0_ADDRDEC1_COL_SEL_HI_CS23
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
//MMEA0_ADDRDEC1_RM_SEL_CS01
#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC1_RM_SEL_CS23
#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC1_RM_SEL_SECCS01
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_ADDRDEC1_RM_SEL_SECCS23
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
//MMEA0_IO_RD_CLI2GRP_MAP0
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
//MMEA0_IO_RD_CLI2GRP_MAP1
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
//MMEA0_IO_WR_CLI2GRP_MAP0
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
//MMEA0_IO_WR_CLI2GRP_MAP1
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
//MMEA0_IO_RD_COMBINE_FLUSH
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
//MMEA0_IO_WR_COMBINE_FLUSH
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
//MMEA0_IO_GROUP_BURST
#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
//MMEA0_IO_RD_PRI_AGE
#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
//MMEA0_IO_WR_PRI_AGE
#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
//MMEA0_IO_RD_PRI_QUEUING
#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
//MMEA0_IO_WR_PRI_QUEUING
#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
//MMEA0_IO_RD_PRI_FIXED
#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
//MMEA0_IO_WR_PRI_FIXED
#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
//MMEA0_IO_RD_PRI_URGENCY
#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
//MMEA0_IO_WR_PRI_URGENCY
#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
//MMEA0_IO_RD_PRI_URGENCY_MASKING
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
//MMEA0_IO_WR_PRI_URGENCY_MASKING
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
//MMEA0_IO_RD_PRI_QUANT_PRI1
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_IO_RD_PRI_QUANT_PRI2
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_IO_RD_PRI_QUANT_PRI3
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_IO_WR_PRI_QUANT_PRI1
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_IO_WR_PRI_QUANT_PRI2
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_IO_WR_PRI_QUANT_PRI3
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
//MMEA0_SDP_ARB_DRAM
#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
//MMEA0_SDP_ARB_FINAL
#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
//MMEA0_SDP_DRAM_PRIORITY
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
//MMEA0_SDP_IO_PRIORITY
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
//MMEA0_SDP_CREDITS
#define MMEA0_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
#define MMEA0_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
//MMEA0_SDP_TAG_RESERVE0
#define MMEA0_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
#define MMEA0_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
#define MMEA0_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
#define MMEA0_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
#define MMEA0_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
#define MMEA0_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
#define MMEA0_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
#define MMEA0_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
//MMEA0_SDP_TAG_RESERVE1
#define MMEA0_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
#define MMEA0_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
#define MMEA0_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
#define MMEA0_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
#define MMEA0_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
#define MMEA0_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
#define MMEA0_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
#define MMEA0_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
//MMEA0_SDP_VCC_RESERVE0
#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
//MMEA0_SDP_VCC_RESERVE1
#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
//MMEA0_SDP_VCD_RESERVE0
#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
//MMEA0_SDP_VCD_RESERVE1
#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
//MMEA0_SDP_REQ_CNTL
#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
//MMEA0_MISC
#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
#define MMEA0_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
#define MMEA0_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
//MMEA0_LATENCY_SAMPLING
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
//MMEA0_PERFCOUNTER_LO
#define MMEA0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
#define MMEA0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
//MMEA0_PERFCOUNTER_HI
#define MMEA0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
#define MMEA0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
//MMEA0_PERFCOUNTER0_CFG
#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
#define MMEA0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
#define MMEA0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
#define MMEA0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
#define MMEA0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
//MMEA0_PERFCOUNTER1_CFG
#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
#define MMEA0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
#define MMEA0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
#define MMEA0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
#define MMEA0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
//MMEA0_PERFCOUNTER_RSLT_CNTL
#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
//MMEA0_EDC_CNT
#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
//MMEA0_EDC_CNT2
#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
//MMEA0_DSM_CNTL
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
//MMEA0_DSM_CNTLA
#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
//MMEA0_DSM_CNTL2
#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
#define MMEA0_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
#define MMEA0_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
//MMEA0_DSM_CNTL2A
#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
//MMEA0_CGTT_CLK_CTRL
#define MMEA0_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define MMEA0_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
#define MMEA0_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
#define MMEA0_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define MMEA0_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
#define MMEA0_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
//MMEA0_EDC_MODE
#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11
#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14
#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d
#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f
#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L
#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L
#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L
#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L
//MMEA0_ERR_STATUS
#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd
#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
//MMEA0_MISC2
#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
#define MMEA0_MISC2__RRET_SWAP_MODE__SHIFT 0xd
#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
#define MMEA0_MISC2__RRET_SWAP_MODE_MASK 0x00002000L
//MMEA0_ADDRDEC_SELECT
#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0
#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5
#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa
#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf
#define MMEA0_ADDRDEC_SELECT__DRAM_GECC_ENABLE__SHIFT 0x14
#define MMEA0_ADDRDEC_SELECT__GMI_GECC_ENABLE__SHIFT 0x15
#define MMEA0_ADDRDEC_SELECT__DRAM_SKIP_MSB__SHIFT 0x16
#define MMEA0_ADDRDEC_SELECT__GMI_SKIP_MSB__SHIFT 0x17
#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL
#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L
#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L
#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L
#define MMEA0_ADDRDEC_SELECT__DRAM_GECC_ENABLE_MASK 0x00100000L
#define MMEA0_ADDRDEC_SELECT__GMI_GECC_ENABLE_MASK 0x00200000L
#define MMEA0_ADDRDEC_SELECT__DRAM_SKIP_MSB_MASK 0x00400000L
#define MMEA0_ADDRDEC_SELECT__GMI_SKIP_MSB_MASK 0x00800000L
// addressBlock: mmhub_pctldec
//PCTL_MISC
#define PCTL_MISC__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x0
#define PCTL_MISC__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x3
#define PCTL_MISC__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0x6
#define PCTL_MISC__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0xb
#define PCTL_MISC__OVR_EA0_SDP_PARTACK__SHIFT 0xc
#define PCTL_MISC__OVR_EA1_SDP_PARTACK__SHIFT 0xd
#define PCTL_MISC__OVR_EA0_SDP_FULLACK__SHIFT 0xe
#define PCTL_MISC__OVR_EA1_SDP_FULLACK__SHIFT 0xf
#define PCTL_MISC__PGFSM_CMD_STATUS__SHIFT 0x10
#define PCTL_MISC__ALLOW_DEEP_SLEEP_MODE_MASK 0x00000007L
#define PCTL_MISC__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x00000038L
#define PCTL_MISC__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x000007C0L
#define PCTL_MISC__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00000800L
#define PCTL_MISC__OVR_EA0_SDP_PARTACK_MASK 0x00001000L
#define PCTL_MISC__OVR_EA1_SDP_PARTACK_MASK 0x00002000L
#define PCTL_MISC__OVR_EA0_SDP_FULLACK_MASK 0x00004000L
#define PCTL_MISC__OVR_EA1_SDP_FULLACK_MASK 0x00008000L
#define PCTL_MISC__PGFSM_CMD_STATUS_MASK 0x00030000L
//PCTL_MMHUB_DEEPSLEEP
#define PCTL_MMHUB_DEEPSLEEP__DS0__SHIFT 0x0
#define PCTL_MMHUB_DEEPSLEEP__DS1__SHIFT 0x1
#define PCTL_MMHUB_DEEPSLEEP__DS2__SHIFT 0x2
#define PCTL_MMHUB_DEEPSLEEP__DS3__SHIFT 0x3
#define PCTL_MMHUB_DEEPSLEEP__DS4__SHIFT 0x4
#define PCTL_MMHUB_DEEPSLEEP__DS5__SHIFT 0x5
#define PCTL_MMHUB_DEEPSLEEP__DS6__SHIFT 0x6
#define PCTL_MMHUB_DEEPSLEEP__DS7__SHIFT 0x7
#define PCTL_MMHUB_DEEPSLEEP__DS8__SHIFT 0x8
#define PCTL_MMHUB_DEEPSLEEP__DS9__SHIFT 0x9
#define PCTL_MMHUB_DEEPSLEEP__DS10__SHIFT 0xa
#define PCTL_MMHUB_DEEPSLEEP__DS11__SHIFT 0xb
#define PCTL_MMHUB_DEEPSLEEP__DS12__SHIFT 0xc
#define PCTL_MMHUB_DEEPSLEEP__DS13__SHIFT 0xd
#define PCTL_MMHUB_DEEPSLEEP__DS14__SHIFT 0xe
#define PCTL_MMHUB_DEEPSLEEP__DS15__SHIFT 0xf
#define PCTL_MMHUB_DEEPSLEEP__DS16__SHIFT 0x10
#define PCTL_MMHUB_DEEPSLEEP__SETCLEAR__SHIFT 0x1f
#define PCTL_MMHUB_DEEPSLEEP__DS0_MASK 0x00000001L
#define PCTL_MMHUB_DEEPSLEEP__DS1_MASK 0x00000002L
#define PCTL_MMHUB_DEEPSLEEP__DS2_MASK 0x00000004L
#define PCTL_MMHUB_DEEPSLEEP__DS3_MASK 0x00000008L
#define PCTL_MMHUB_DEEPSLEEP__DS4_MASK 0x00000010L
#define PCTL_MMHUB_DEEPSLEEP__DS5_MASK 0x00000020L
#define PCTL_MMHUB_DEEPSLEEP__DS6_MASK 0x00000040L
#define PCTL_MMHUB_DEEPSLEEP__DS7_MASK 0x00000080L
#define PCTL_MMHUB_DEEPSLEEP__DS8_MASK 0x00000100L
#define PCTL_MMHUB_DEEPSLEEP__DS9_MASK 0x00000200L
#define PCTL_MMHUB_DEEPSLEEP__DS10_MASK 0x00000400L
#define PCTL_MMHUB_DEEPSLEEP__DS11_MASK 0x00000800L
#define PCTL_MMHUB_DEEPSLEEP__DS12_MASK 0x00001000L
#define PCTL_MMHUB_DEEPSLEEP__DS13_MASK 0x00002000L
#define PCTL_MMHUB_DEEPSLEEP__DS14_MASK 0x00004000L
#define PCTL_MMHUB_DEEPSLEEP__DS15_MASK 0x00008000L
#define PCTL_MMHUB_DEEPSLEEP__DS16_MASK 0x00010000L
#define PCTL_MMHUB_DEEPSLEEP__SETCLEAR_MASK 0x80000000L
//PCTL_MMHUB_DEEPSLEEP_OVERRIDE
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L
#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L
//PCTL_PG_IGNORE_DEEPSLEEP
#define PCTL_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x0
#define PCTL_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x1
#define PCTL_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x2
#define PCTL_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x3
#define PCTL_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x4
#define PCTL_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x5
#define PCTL_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x6
#define PCTL_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x7
#define PCTL_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x8
#define PCTL_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x9
#define PCTL_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0xa
#define PCTL_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xb
#define PCTL_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xc
#define PCTL_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xd
#define PCTL_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xe
#define PCTL_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xf
#define PCTL_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0x10
#define PCTL_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x11
#define PCTL_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00000001L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000002L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000004L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000008L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000010L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000020L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000040L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000080L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000100L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000200L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000400L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000800L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00001000L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00002000L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00004000L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00008000L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00010000L
#define PCTL_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00020000L
//PCTL_PG_DAGB
#define PCTL_PG_DAGB__DS0__SHIFT 0x0
#define PCTL_PG_DAGB__DS1__SHIFT 0x1
#define PCTL_PG_DAGB__DS2__SHIFT 0x2
#define PCTL_PG_DAGB__DS3__SHIFT 0x3
#define PCTL_PG_DAGB__DS4__SHIFT 0x4
#define PCTL_PG_DAGB__DS5__SHIFT 0x5
#define PCTL_PG_DAGB__DS6__SHIFT 0x6
#define PCTL_PG_DAGB__DS7__SHIFT 0x7
#define PCTL_PG_DAGB__DS8__SHIFT 0x8
#define PCTL_PG_DAGB__DS9__SHIFT 0x9
#define PCTL_PG_DAGB__DS10__SHIFT 0xa
#define PCTL_PG_DAGB__DS11__SHIFT 0xb
#define PCTL_PG_DAGB__DS12__SHIFT 0xc
#define PCTL_PG_DAGB__DS13__SHIFT 0xd
#define PCTL_PG_DAGB__DS14__SHIFT 0xe
#define PCTL_PG_DAGB__DS15__SHIFT 0xf
#define PCTL_PG_DAGB__DS16__SHIFT 0x10
#define PCTL_PG_DAGB__DS0_MASK 0x00000001L
#define PCTL_PG_DAGB__DS1_MASK 0x00000002L
#define PCTL_PG_DAGB__DS2_MASK 0x00000004L
#define PCTL_PG_DAGB__DS3_MASK 0x00000008L
#define PCTL_PG_DAGB__DS4_MASK 0x00000010L
#define PCTL_PG_DAGB__DS5_MASK 0x00000020L
#define PCTL_PG_DAGB__DS6_MASK 0x00000040L
#define PCTL_PG_DAGB__DS7_MASK 0x00000080L
#define PCTL_PG_DAGB__DS8_MASK 0x00000100L
#define PCTL_PG_DAGB__DS9_MASK 0x00000200L
#define PCTL_PG_DAGB__DS10_MASK 0x00000400L
#define PCTL_PG_DAGB__DS11_MASK 0x00000800L
#define PCTL_PG_DAGB__DS12_MASK 0x00001000L
#define PCTL_PG_DAGB__DS13_MASK 0x00002000L
#define PCTL_PG_DAGB__DS14_MASK 0x00004000L
#define PCTL_PG_DAGB__DS15_MASK 0x00008000L
#define PCTL_PG_DAGB__DS16_MASK 0x00010000L
//PCTL0_RENG_RAM_INDEX
#define PCTL0_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
#define PCTL0_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000007FFL
//PCTL0_RENG_RAM_DATA
#define PCTL0_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
#define PCTL0_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
//PCTL0_RENG_EXECUTE
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FFCL
#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x00FFE000L
//PCTL1_RENG_RAM_INDEX
#define PCTL1_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
#define PCTL1_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
//PCTL1_RENG_RAM_DATA
#define PCTL1_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
#define PCTL1_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
//PCTL1_RENG_EXECUTE
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
//PCTL2_RENG_RAM_INDEX
#define PCTL2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
#define PCTL2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
//PCTL2_RENG_RAM_DATA
#define PCTL2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
#define PCTL2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
//PCTL2_RENG_EXECUTE
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL
#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L
//PCTL0_STCTRL_REGISTER_SAVE_RANGE0
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL0_STCTRL_REGISTER_SAVE_RANGE1
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL0_STCTRL_REGISTER_SAVE_RANGE2
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL0_STCTRL_REGISTER_SAVE_RANGE3
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL0_STCTRL_REGISTER_SAVE_RANGE4
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
//PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x0
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x10
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000FFFFL
#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_RANGE0
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_RANGE1
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_RANGE2
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_RANGE3
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_RANGE4
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
//PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x0
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x10
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000FFFFL
#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_RANGE0
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_RANGE1
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_RANGE2
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_RANGE3
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_RANGE4
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
//PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x0
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x10
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000FFFFL
#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xFFFF0000L
//PCTL0_MISC
#define PCTL0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb
#define PCTL0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc
#define PCTL0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf
#define PCTL0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10
#define PCTL0_MISC__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x11
#define PCTL0_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x12
#define PCTL0_MISC__RD_TIMER_ENABLE__SHIFT 0x13
#define PCTL0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L
#define PCTL0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L
#define PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L
#define PCTL0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L
#define PCTL0_MISC__RENG_EXECUTE_ON_PWR_UP_MASK 0x00020000L
#define PCTL0_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00040000L
#define PCTL0_MISC__RD_TIMER_ENABLE_MASK 0x00080000L
//PCTL1_MISC
#define PCTL1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
#define PCTL1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
#define PCTL1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
#define PCTL1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
#define PCTL1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
#define PCTL1_MISC__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x11
#define PCTL1_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x12
#define PCTL1_MISC__RD_TIMER_ENABLE__SHIFT 0x13
#define PCTL1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
#define PCTL1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
#define PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
#define PCTL1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
#define PCTL1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
#define PCTL1_MISC__RENG_EXECUTE_ON_PWR_UP_MASK 0x00020000L
#define PCTL1_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00040000L
#define PCTL1_MISC__RD_TIMER_ENABLE_MASK 0x00080000L
//PCTL2_MISC
#define PCTL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
#define PCTL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
#define PCTL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
#define PCTL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
#define PCTL2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
#define PCTL2_MISC__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x11
#define PCTL2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x12
#define PCTL2_MISC__RD_TIMER_ENABLE__SHIFT 0x13
#define PCTL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
#define PCTL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
#define PCTL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
#define PCTL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
#define PCTL2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
#define PCTL2_MISC__RENG_EXECUTE_ON_PWR_UP_MASK 0x00020000L
#define PCTL2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00040000L
#define PCTL2_MISC__RD_TIMER_ENABLE_MASK 0x00080000L
//PCTL_PERFCOUNTER_LO
#define PCTL_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
#define PCTL_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
//PCTL_PERFCOUNTER_HI
#define PCTL_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
#define PCTL_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
#define PCTL_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define PCTL_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
//PCTL_PERFCOUNTER0_CFG
#define PCTL_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
#define PCTL_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
#define PCTL_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
#define PCTL_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
#define PCTL_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
#define PCTL_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
#define PCTL_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define PCTL_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
#define PCTL_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
#define PCTL_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
//PCTL_PERFCOUNTER1_CFG
#define PCTL_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
#define PCTL_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
#define PCTL_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
#define PCTL_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
#define PCTL_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
#define PCTL_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
#define PCTL_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define PCTL_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
#define PCTL_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
#define PCTL_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
//PCTL_PERFCOUNTER_RSLT_CNTL
#define PCTL_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
#define PCTL_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
#define PCTL_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
#define PCTL_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
#define PCTL_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
#define PCTL_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
#define PCTL_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
#define PCTL_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
#define PCTL_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
#define PCTL_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define PCTL_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define PCTL_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
// addressBlock: mmhub_l1tlb_mmvml1pfdec
//MMMC_VM_MX_L1_TLB0_STATUS
#define MMMC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB1_STATUS
#define MMMC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB2_STATUS
#define MMMC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB3_STATUS
#define MMMC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB4_STATUS
#define MMMC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB5_STATUS
#define MMMC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB6_STATUS
#define MMMC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
//MMMC_VM_MX_L1_TLB7_STATUS
#define MMMC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
#define MMMC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
// addressBlock: mmhub_l1tlb_mmvml1pldec
//MMMC_VM_MX_L1_PERFCOUNTER0_CFG
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_MX_L1_PERFCOUNTER1_CFG
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_MX_L1_PERFCOUNTER2_CFG
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_MX_L1_PERFCOUNTER3_CFG
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
// addressBlock: mmhub_l1tlb_mmvml1prdec
//MMMC_VM_MX_L1_PERFCOUNTER_LO
#define MMMC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
//MMMC_VM_MX_L1_PERFCOUNTER_HI
#define MMMC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
#define MMMC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
#define MMMC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define MMMC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
// addressBlock: mmhub_mmutcl2_mmatcl2dec
//MM_ATC_L2_CNTL
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
#define MM_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x8
#define MM_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
#define MM_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
#define MM_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00000700L
#define MM_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
//MM_ATC_L2_CNTL2
#define MM_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
#define MM_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
#define MM_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
#define MM_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
#define MM_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
#define MM_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
#define MM_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
#define MM_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
#define MM_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
#define MM_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
#define MM_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
#define MM_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
//MM_ATC_L2_CACHE_DATA0
#define MM_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
#define MM_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
#define MM_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
#define MM_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x18
#define MM_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
#define MM_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
#define MM_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x00FFFFFCL
#define MM_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x0F000000L
//MM_ATC_L2_CACHE_DATA1
#define MM_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
#define MM_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
//MM_ATC_L2_CACHE_DATA2
#define MM_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
#define MM_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
//MM_ATC_L2_CNTL3
#define MM_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
#define MM_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
#define MM_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9
#define MM_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
#define MM_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
#define MM_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L
//MM_ATC_L2_STATUS
#define MM_ATC_L2_STATUS__BUSY__SHIFT 0x0
#define MM_ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
#define MM_ATC_L2_STATUS__BUSY_MASK 0x00000001L
#define MM_ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x3FFFFFFEL
//MM_ATC_L2_STATUS2
#define MM_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
#define MM_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
#define MM_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
#define MM_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
//MM_ATC_L2_MISC_CG
#define MM_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
#define MM_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
#define MM_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
#define MM_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
#define MM_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
#define MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
//MM_ATC_L2_MEM_POWER_LS
#define MM_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
#define MM_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
#define MM_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
#define MM_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
//MM_ATC_L2_CGTT_CLK_CTRL
#define MM_ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define MM_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
#define MM_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
#define MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
#define MM_ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define MM_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define MM_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
//MM_ATC_L2_SDPPORT_CTRL
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN__SHIFT 0x0
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV__SHIFT 0x1
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN__SHIFT 0x2
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x3
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN__SHIFT 0x4
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV__SHIFT 0x5
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN__SHIFT 0x6
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV__SHIFT 0x7
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN__SHIFT 0x8
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV__SHIFT 0x9
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN_MASK 0x00000001L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV_MASK 0x00000002L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN_MASK 0x00000004L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV_MASK 0x00000008L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN_MASK 0x00000010L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV_MASK 0x00000020L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN_MASK 0x00000040L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV_MASK 0x00000080L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN_MASK 0x00000100L
#define MM_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV_MASK 0x00000200L
// addressBlock: mmhub_mmutcl2_mmvml2pfdec
//MMVM_L2_CNTL
#define MMVM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
#define MMVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
#define MMVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
#define MMVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
#define MMVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
#define MMVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
#define MMVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
#define MMVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
#define MMVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
#define MMVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
#define MMVM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
#define MMVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
#define MMVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
#define MMVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
#define MMVM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
#define MMVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
#define MMVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
#define MMVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
#define MMVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
#define MMVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
#define MMVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
#define MMVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
#define MMVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
#define MMVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
#define MMVM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
#define MMVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
#define MMVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
#define MMVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
//MMVM_L2_CNTL2
#define MMVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
#define MMVM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
#define MMVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
#define MMVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
#define MMVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
#define MMVM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
#define MMVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
#define MMVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
#define MMVM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
#define MMVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
#define MMVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
#define MMVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
#define MMVM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
#define MMVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
//MMVM_L2_CNTL3
#define MMVM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
#define MMVM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
#define MMVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
#define MMVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
#define MMVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
#define MMVM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
#define MMVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
#define MMVM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
#define MMVM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
#define MMVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
#define MMVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
#define MMVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
#define MMVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
#define MMVM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
#define MMVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
//MMVM_L2_STATUS
#define MMVM_L2_STATUS__L2_BUSY__SHIFT 0x0
#define MMVM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
#define MMVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
#define MMVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
#define MMVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
#define MMVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
#define MMVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
#define MMVM_L2_STATUS__L2_BUSY_MASK 0x00000001L
#define MMVM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
#define MMVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
#define MMVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
#define MMVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
#define MMVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
#define MMVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
//MMVM_DUMMY_PAGE_FAULT_CNTL
#define MMVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
#define MMVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
#define MMVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
#define MMVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
#define MMVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
#define MMVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
//MMVM_DUMMY_PAGE_FAULT_ADDR_LO32
#define MMVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
#define MMVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
//MMVM_DUMMY_PAGE_FAULT_ADDR_HI32
#define MMVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
#define MMVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
//MMVM_INVALIDATE_CNTL
#define MMVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING__SHIFT 0x0
#define MMVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING__SHIFT 0x8
#define MMVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING_MASK 0x000000FFL
#define MMVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING_MASK 0x0000FF00L
//MMVM_L2_PROTECTION_FAULT_CNTL
#define MMVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
#define MMVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
#define MMVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
#define MMVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
#define MMVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
#define MMVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
#define MMVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
#define MMVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
#define MMVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
#define MMVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
#define MMVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
#define MMVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
#define MMVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
#define MMVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
#define MMVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
#define MMVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
#define MMVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
#define MMVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
#define MMVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
#define MMVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
#define MMVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
#define MMVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
#define MMVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
#define MMVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
#define MMVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
#define MMVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
#define MMVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
#define MMVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
#define MMVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
//MMVM_L2_PROTECTION_FAULT_CNTL2
#define MMVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
#define MMVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
#define MMVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
#define MMVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
#define MMVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
#define MMVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
#define MMVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
#define MMVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
#define MMVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
//MMVM_L2_PROTECTION_FAULT_MM_CNTL3
#define MMVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
//MMVM_L2_PROTECTION_FAULT_MM_CNTL4
#define MMVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
//MMVM_L2_PROTECTION_FAULT_STATUS
#define MMVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
#define MMVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
#define MMVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
#define MMVM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
#define MMVM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
#define MMVM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
#define MMVM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
#define MMVM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
#define MMVM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
#define MMVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
#define MMVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
#define MMVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
#define MMVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
#define MMVM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
#define MMVM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
#define MMVM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
#define MMVM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
#define MMVM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
#define MMVM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x3E000000L
//MMVM_L2_PROTECTION_FAULT_ADDR_LO32
#define MMVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
//MMVM_L2_PROTECTION_FAULT_ADDR_HI32
#define MMVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
//MMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
#define MMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
//MMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
#define MMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
#define MMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
//MMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
#define MMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
#define MMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
//MMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
#define MMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
#define MMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
//MMVM_L2_CNTL4
#define MMVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
#define MMVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
#define MMVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
#define MMVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
#define MMVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
#define MMVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
#define MMVM_L2_CNTL4__GC_CH_FGCG_OFF__SHIFT 0x1d
#define MMVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
#define MMVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
#define MMVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
#define MMVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
#define MMVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
#define MMVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
#define MMVM_L2_CNTL4__GC_CH_FGCG_OFF_MASK 0x20000000L
//MMVM_L2_MM_GROUP_RT_CLASSES
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
#define MMVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
//MMVM_L2_BANK_SELECT_RESERVED_CID
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
#define MMVM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
#define MMVM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
#define MMVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
//MMVM_L2_BANK_SELECT_RESERVED_CID2
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
#define MMVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
//MMVM_L2_CACHE_PARITY_CNTL
#define MMVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
#define MMVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
#define MMVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
#define MMVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
#define MMVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
#define MMVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
#define MMVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
//MMVM_L2_IH_LOG_CNTL
#define MMVM_L2_IH_LOG_CNTL__ENABLE_LOGGING__SHIFT 0x0
#define MMVM_L2_IH_LOG_CNTL__USE_L_BIT__SHIFT 0x1
#define MMVM_L2_IH_LOG_CNTL__REGISTER_ADDRESS__SHIFT 0x2
#define MMVM_L2_IH_LOG_CNTL__LOG_ALL_TRANSLATIONS__SHIFT 0x14
#define MMVM_L2_IH_LOG_CNTL__ENABLE_LOGGING_MASK 0x00000001L
#define MMVM_L2_IH_LOG_CNTL__USE_L_BIT_MASK 0x00000002L
#define MMVM_L2_IH_LOG_CNTL__REGISTER_ADDRESS_MASK 0x000FFFFCL
#define MMVM_L2_IH_LOG_CNTL__LOG_ALL_TRANSLATIONS_MASK 0x00100000L
//MMVM_L2_IH_LOG_BUSY
#define MMVM_L2_IH_LOG_BUSY__PER_VMID_TRANSLATION_BUSY__SHIFT 0x0
#define MMVM_L2_IH_LOG_BUSY__PER_VMID_INVALIDATION_BUSY__SHIFT 0x10
#define MMVM_L2_IH_LOG_BUSY__PER_VMID_TRANSLATION_BUSY_MASK 0x0000FFFFL
#define MMVM_L2_IH_LOG_BUSY__PER_VMID_INVALIDATION_BUSY_MASK 0xFFFF0000L
//MMVM_L2_CGTT_CLK_CTRL
#define MMVM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define MMVM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define MMVM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
#define MMVM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
#define MMVM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
#define MMVM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define MMVM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define MMVM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define MMVM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define MMVM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
//MMVM_L2_CNTL5
#define MMVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
#define MMVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID__SHIFT 0x5
#define MMVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
#define MMVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID_MASK 0x00003FE0L
//MMVM_L2_GCR_CNTL
#define MMVM_L2_GCR_CNTL__GCR_ENABLE__SHIFT 0x0
#define MMVM_L2_GCR_CNTL__GCR_CLIENT_ID__SHIFT 0x1
#define MMVM_L2_GCR_CNTL__GCR_ENABLE_MASK 0x00000001L
#define MMVM_L2_GCR_CNTL__GCR_CLIENT_ID_MASK 0x000003FEL
//MMVML2_WALKER_MACRO_THROTTLE_TIME
#define MMVML2_WALKER_MACRO_THROTTLE_TIME__TIME__SHIFT 0x0
#define MMVML2_WALKER_MACRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
//MMVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT
#define MMVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
#define MMVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
//MMVML2_WALKER_MICRO_THROTTLE_TIME
#define MMVML2_WALKER_MICRO_THROTTLE_TIME__TIME__SHIFT 0x0
#define MMVML2_WALKER_MICRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
//MMVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT
#define MMVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
#define MMVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
// addressBlock: mmhub_mmutcl2_mmvml2vcdec
//MMVM_CONTEXT0_CNTL
#define MMVM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT1_CNTL
#define MMVM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT2_CNTL
#define MMVM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT3_CNTL
#define MMVM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT4_CNTL
#define MMVM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT5_CNTL
#define MMVM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT6_CNTL
#define MMVM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT7_CNTL
#define MMVM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT8_CNTL
#define MMVM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT9_CNTL
#define MMVM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT10_CNTL
#define MMVM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT11_CNTL
#define MMVM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT12_CNTL
#define MMVM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT13_CNTL
#define MMVM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT14_CNTL
#define MMVM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXT15_CNTL
#define MMVM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
#define MMVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
#define MMVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
#define MMVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
#define MMVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
#define MMVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
#define MMVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
#define MMVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
#define MMVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
#define MMVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
#define MMVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
#define MMVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
#define MMVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
#define MMVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
#define MMVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
#define MMVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
#define MMVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
#define MMVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
#define MMVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
#define MMVM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
#define MMVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
#define MMVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
#define MMVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
#define MMVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
#define MMVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
#define MMVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
#define MMVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
#define MMVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
#define MMVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
#define MMVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
#define MMVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
#define MMVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
#define MMVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
#define MMVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
#define MMVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
#define MMVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
#define MMVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
#define MMVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
//MMVM_CONTEXTS_DISABLE
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
#define MMVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
//MMVM_INVALIDATE_ENG0_SEM
#define MMVM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG1_SEM
#define MMVM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG2_SEM
#define MMVM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG3_SEM
#define MMVM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG4_SEM
#define MMVM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG5_SEM
#define MMVM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG6_SEM
#define MMVM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG7_SEM
#define MMVM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG8_SEM
#define MMVM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG9_SEM
#define MMVM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG10_SEM
#define MMVM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG11_SEM
#define MMVM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG12_SEM
#define MMVM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG13_SEM
#define MMVM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG14_SEM
#define MMVM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG15_SEM
#define MMVM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG16_SEM
#define MMVM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG17_SEM
#define MMVM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
#define MMVM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
//MMVM_INVALIDATE_ENG0_REQ
#define MMVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG0_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG0_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG1_REQ
#define MMVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG1_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG1_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG2_REQ
#define MMVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG2_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG2_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG3_REQ
#define MMVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG3_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG3_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG4_REQ
#define MMVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG4_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG4_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG5_REQ
#define MMVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG5_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG5_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG6_REQ
#define MMVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG6_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG6_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG7_REQ
#define MMVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG7_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG7_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG8_REQ
#define MMVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG8_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG8_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG9_REQ
#define MMVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG9_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG9_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG10_REQ
#define MMVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG10_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG10_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG11_REQ
#define MMVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG11_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG11_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG12_REQ
#define MMVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG12_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG12_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG13_REQ
#define MMVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG13_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG13_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG14_REQ
#define MMVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG14_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG14_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG15_REQ
#define MMVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG15_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG15_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG16_REQ
#define MMVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG16_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG16_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG17_REQ
#define MMVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
#define MMVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
#define MMVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
#define MMVM_INVALIDATE_ENG17_REQ__LOG_REQUEST__SHIFT 0x19
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
#define MMVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00070000L
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
#define MMVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
#define MMVM_INVALIDATE_ENG17_REQ__LOG_REQUEST_MASK 0x02000000L
#define MMVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
//MMVM_INVALIDATE_ENG0_ACK
#define MMVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG1_ACK
#define MMVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG2_ACK
#define MMVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG3_ACK
#define MMVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG4_ACK
#define MMVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG5_ACK
#define MMVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG6_ACK
#define MMVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG7_ACK
#define MMVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG8_ACK
#define MMVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG9_ACK
#define MMVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG10_ACK
#define MMVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG11_ACK
#define MMVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG12_ACK
#define MMVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG13_ACK
#define MMVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG14_ACK
#define MMVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG15_ACK
#define MMVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG16_ACK
#define MMVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG17_ACK
#define MMVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
#define MMVM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
#define MMVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
#define MMVM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
//MMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG1_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG2_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG2_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG3_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG3_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG4_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG4_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG5_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG5_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG6_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG6_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG7_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG7_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG8_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG8_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG9_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG9_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG10_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG10_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG11_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG11_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG12_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG12_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG13_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG13_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG14_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG14_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG15_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG15_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG16_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG16_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_INVALIDATE_ENG17_ADDR_RANGE_LO32
#define MMVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
#define MMVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
#define MMVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
#define MMVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
//MMVM_INVALIDATE_ENG17_ADDR_RANGE_HI32
#define MMVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
#define MMVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
//MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
#define MMVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
#define MMVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
#define MMVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
#define MMVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
#define MMVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
#define MMVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
//MMVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
#define MMVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
#define MMVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
//MMVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
#define MMVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
#define MMVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
// addressBlock: mmhub_mmutcl2_mmvml2pldec
//MMMC_VM_L2_PERFCOUNTER0_CFG
#define MMMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER1_CFG
#define MMMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER2_CFG
#define MMMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER3_CFG
#define MMMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER4_CFG
#define MMMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER5_CFG
#define MMMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER6_CFG
#define MMMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER7_CFG
#define MMMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
#define MMMC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
#define MMMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
#define MMMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
#define MMMC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
#define MMMC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
//MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
// addressBlock: mmhub_mmutcl2_mmvml2prdec
//MMMC_VM_L2_PERFCOUNTER_LO
#define MMMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
//MMMC_VM_L2_PERFCOUNTER_HI
#define MMMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
#define MMMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
#define MMMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define MMMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
// addressBlock: mmhub_mmutcl2_mmvmsharedhvdec
//MMMC_VM_FB_SIZE_OFFSET_VF0
#define MMMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF1
#define MMMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF2
#define MMMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF3
#define MMMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF4
#define MMMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF5
#define MMMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF6
#define MMMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF7
#define MMMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF8
#define MMMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF9
#define MMMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF10
#define MMMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF11
#define MMMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF12
#define MMMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF13
#define MMMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF14
#define MMMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF15
#define MMMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF16
#define MMMC_VM_FB_SIZE_OFFSET_VF16__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF16__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF16__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF16__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF17
#define MMMC_VM_FB_SIZE_OFFSET_VF17__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF17__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF17__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF17__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF18
#define MMMC_VM_FB_SIZE_OFFSET_VF18__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF18__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF18__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF18__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF19
#define MMMC_VM_FB_SIZE_OFFSET_VF19__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF19__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF19__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF19__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF20
#define MMMC_VM_FB_SIZE_OFFSET_VF20__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF20__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF20__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF20__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF21
#define MMMC_VM_FB_SIZE_OFFSET_VF21__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF21__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF21__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF21__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF22
#define MMMC_VM_FB_SIZE_OFFSET_VF22__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF22__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF22__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF22__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF23
#define MMMC_VM_FB_SIZE_OFFSET_VF23__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF23__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF23__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF23__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF24
#define MMMC_VM_FB_SIZE_OFFSET_VF24__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF24__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF24__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF24__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF25
#define MMMC_VM_FB_SIZE_OFFSET_VF25__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF25__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF25__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF25__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF26
#define MMMC_VM_FB_SIZE_OFFSET_VF26__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF26__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF26__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF26__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF27
#define MMMC_VM_FB_SIZE_OFFSET_VF27__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF27__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF27__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF27__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF28
#define MMMC_VM_FB_SIZE_OFFSET_VF28__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF28__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF28__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF28__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF29
#define MMMC_VM_FB_SIZE_OFFSET_VF29__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF29__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF29__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF29__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF30
#define MMMC_VM_FB_SIZE_OFFSET_VF30__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF30__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF30__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF30__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMMC_VM_FB_SIZE_OFFSET_VF31
#define MMMC_VM_FB_SIZE_OFFSET_VF31__VF_FB_SIZE__SHIFT 0x0
#define MMMC_VM_FB_SIZE_OFFSET_VF31__VF_FB_OFFSET__SHIFT 0x10
#define MMMC_VM_FB_SIZE_OFFSET_VF31__VF_FB_SIZE_MASK 0x0000FFFFL
#define MMMC_VM_FB_SIZE_OFFSET_VF31__VF_FB_OFFSET_MASK 0xFFFF0000L
//MMVM_IOMMU_MMIO_CNTRL_1
#define MMVM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
#define MMVM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
//MMMC_VM_MARC_BASE_LO_0
#define MMMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
#define MMMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
//MMMC_VM_MARC_BASE_LO_1
#define MMMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
#define MMMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
//MMMC_VM_MARC_BASE_LO_2
#define MMMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
#define MMMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
//MMMC_VM_MARC_BASE_LO_3
#define MMMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
#define MMMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
//MMMC_VM_MARC_BASE_HI_0
#define MMMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
#define MMMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
//MMMC_VM_MARC_BASE_HI_1
#define MMMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
#define MMMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
//MMMC_VM_MARC_BASE_HI_2
#define MMMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
#define MMMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
//MMMC_VM_MARC_BASE_HI_3
#define MMMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
#define MMMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
//MMMC_VM_MARC_RELOC_LO_0
#define MMMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
#define MMMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
#define MMMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
#define MMMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
#define MMMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
//MMMC_VM_MARC_RELOC_LO_1
#define MMMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
#define MMMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
#define MMMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
#define MMMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
#define MMMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
//MMMC_VM_MARC_RELOC_LO_2
#define MMMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
#define MMMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
#define MMMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
#define MMMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
#define MMMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
//MMMC_VM_MARC_RELOC_LO_3
#define MMMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
#define MMMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
#define MMMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
#define MMMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
#define MMMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
//MMMC_VM_MARC_RELOC_HI_0
#define MMMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
//MMMC_VM_MARC_RELOC_HI_1
#define MMMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
//MMMC_VM_MARC_RELOC_HI_2
#define MMMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
//MMMC_VM_MARC_RELOC_HI_3
#define MMMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
#define MMMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
//MMMC_VM_MARC_LEN_LO_0
#define MMMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
#define MMMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
//MMMC_VM_MARC_LEN_LO_1
#define MMMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
#define MMMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
//MMMC_VM_MARC_LEN_LO_2
#define MMMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
#define MMMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
//MMMC_VM_MARC_LEN_LO_3
#define MMMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
#define MMMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
//MMMC_VM_MARC_LEN_HI_0
#define MMMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
#define MMMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
//MMMC_VM_MARC_LEN_HI_1
#define MMMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
#define MMMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
//MMMC_VM_MARC_LEN_HI_2
#define MMMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
#define MMMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
//MMMC_VM_MARC_LEN_HI_3
#define MMMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
#define MMMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
//MMVM_IOMMU_CONTROL_REGISTER
#define MMVM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
#define MMVM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
//MMVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
#define MMVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
#define MMVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
//MMVM_PCIE_ATS_CNTL
#define MMVM_PCIE_ATS_CNTL__STU__SHIFT 0x10
#define MMVM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
#define MMVM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_0
#define MMVM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_1
#define MMVM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_2
#define MMVM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_3
#define MMVM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_4
#define MMVM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_5
#define MMVM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_6
#define MMVM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_7
#define MMVM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_8
#define MMVM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_9
#define MMVM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_10
#define MMVM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_11
#define MMVM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_12
#define MMVM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_13
#define MMVM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_14
#define MMVM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_15
#define MMVM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_16
#define MMVM_PCIE_ATS_CNTL_VF_16__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_16__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_17
#define MMVM_PCIE_ATS_CNTL_VF_17__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_17__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_18
#define MMVM_PCIE_ATS_CNTL_VF_18__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_18__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_19
#define MMVM_PCIE_ATS_CNTL_VF_19__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_19__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_20
#define MMVM_PCIE_ATS_CNTL_VF_20__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_20__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_21
#define MMVM_PCIE_ATS_CNTL_VF_21__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_21__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_22
#define MMVM_PCIE_ATS_CNTL_VF_22__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_22__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_23
#define MMVM_PCIE_ATS_CNTL_VF_23__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_23__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_24
#define MMVM_PCIE_ATS_CNTL_VF_24__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_24__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_25
#define MMVM_PCIE_ATS_CNTL_VF_25__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_25__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_26
#define MMVM_PCIE_ATS_CNTL_VF_26__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_26__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_27
#define MMVM_PCIE_ATS_CNTL_VF_27__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_27__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_28
#define MMVM_PCIE_ATS_CNTL_VF_28__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_28__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_29
#define MMVM_PCIE_ATS_CNTL_VF_29__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_29__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_30
#define MMVM_PCIE_ATS_CNTL_VF_30__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_30__ATC_ENABLE_MASK 0x80000000L
//MMVM_PCIE_ATS_CNTL_VF_31
#define MMVM_PCIE_ATS_CNTL_VF_31__ATC_ENABLE__SHIFT 0x1f
#define MMVM_PCIE_ATS_CNTL_VF_31__ATC_ENABLE_MASK 0x80000000L
//MMUTCL2_CGTT_CLK_CTRL
#define MMUTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define MMUTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define MMUTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
#define MMUTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
#define MMUTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
#define MMUTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
#define MMUTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define MMUTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define MMUTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
#define MMUTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define MMUTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define MMUTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
//MMMC_SHARED_ACTIVE_FCN_ID
#define MMMC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
#define MMMC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
#define MMMC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000001FL
#define MMMC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
// addressBlock: mmhub_mmutcl2_mmvmsharedpfdec
//MMMC_VM_NB_MMIOBASE
#define MMMC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
#define MMMC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
//MMMC_VM_NB_MMIOLIMIT
#define MMMC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
#define MMMC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
//MMMC_VM_NB_PCI_CTRL
#define MMMC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
#define MMMC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
//MMMC_VM_NB_PCI_ARB
#define MMMC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
#define MMMC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
//MMMC_VM_NB_TOP_OF_DRAM_SLOT1
#define MMMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
#define MMMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
//MMMC_VM_NB_LOWER_TOP_OF_DRAM2
#define MMMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
#define MMMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
#define MMMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
#define MMMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
//MMMC_VM_NB_UPPER_TOP_OF_DRAM2
#define MMMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
#define MMMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
//MMMC_VM_FB_OFFSET
#define MMMC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
#define MMMC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
//MMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
#define MMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
#define MMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
//MMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
#define MMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
#define MMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
//MMMC_VM_STEERING
#define MMMC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
#define MMMC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
//MMMC_SHARED_VIRT_RESET_REQ
#define MMMC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
#define MMMC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
#define MMMC_SHARED_VIRT_RESET_REQ__VF_MASK 0x7FFFFFFFL
#define MMMC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
//MMMC_MEM_POWER_LS
#define MMMC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
#define MMMC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
#define MMMC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
#define MMMC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
//MMMC_VM_CACHEABLE_DRAM_ADDRESS_START
#define MMMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
#define MMMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
//MMMC_VM_CACHEABLE_DRAM_ADDRESS_END
#define MMMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
#define MMMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
//MMMC_VM_APT_CNTL
#define MMMC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
#define MMMC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
#define MMMC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
#define MMMC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
//MMMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
#define MMMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
#define MMMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
//MMMC_VM_LOCAL_HBM_ADDRESS_START
#define MMMC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
#define MMMC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
//MMMC_VM_LOCAL_HBM_ADDRESS_END
#define MMMC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
#define MMMC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
//MMMC_SHARED_VIRT_RESET_REQ2
#define MMMC_SHARED_VIRT_RESET_REQ2__VF__SHIFT 0x0
#define MMMC_SHARED_VIRT_RESET_REQ2__VF_MASK 0x00000001L
// addressBlock: mmhub_mmutcl2_mmvmsharedvcdec
//MMMC_VM_FB_LOCATION_BASE
#define MMMC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
#define MMMC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
//MMMC_VM_FB_LOCATION_TOP
#define MMMC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
#define MMMC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
//MMMC_VM_AGP_TOP
#define MMMC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
#define MMMC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
//MMMC_VM_AGP_BOT
#define MMMC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
#define MMMC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
//MMMC_VM_AGP_BASE
#define MMMC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
#define MMMC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
//MMMC_VM_SYSTEM_APERTURE_LOW_ADDR
#define MMMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
#define MMMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
//MMMC_VM_SYSTEM_APERTURE_HIGH_ADDR
#define MMMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
#define MMMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
//MMMC_VM_MX_L1_TLB_CNTL
#define MMMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
#define MMMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
#define MMMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
#define MMMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
#define MMMC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
#define MMMC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
#define MMMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
#define MMMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
#define MMMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
#define MMMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
#define MMMC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
#define MMMC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00003800L
// addressBlock: mmhub_mmutcl2_mmatcl2pfcntrdec
//MM_ATC_L2_PERFCOUNTER_LO
#define MM_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
#define MM_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
//MM_ATC_L2_PERFCOUNTER_HI
#define MM_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
#define MM_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
#define MM_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define MM_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
// addressBlock: mmhub_mmutcl2_mmatcl2pfcntldec
//MM_ATC_L2_PERFCOUNTER0_CFG
#define MM_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
#define MM_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
#define MM_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
#define MM_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
#define MM_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
#define MM_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
#define MM_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MM_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
#define MM_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
#define MM_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
//MM_ATC_L2_PERFCOUNTER1_CFG
#define MM_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
#define MM_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
#define MM_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
#define MM_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
#define MM_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
#define MM_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
#define MM_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
#define MM_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
#define MM_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
#define MM_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
//MM_ATC_L2_PERFCOUNTER_RSLT_CNTL
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MM_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
#endif
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Device Tree file for QNAP TS41X with 6282 SoC
*
* Copyright (C) 2013, Andrew Lunn <[email protected]>
*/
/dts-v1/;
#include "kirkwood.dtsi"
#include "kirkwood-6282.dtsi"
#include "kirkwood-ts219.dtsi"
#include "kirkwood-ts419.dtsi"
ðphy0 { reg = <0>; };
ðphy1 { reg = <1>; };
&pciec { status = "okay"; };
&pcie1 { status = "okay"; };
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Guillemot Maxi Radio FM 2000 PCI radio card driver for Linux
* (C) 2001 Dimitromanolakis Apostolos <[email protected]>
*
* Based in the radio Maestro PCI driver. Actually it uses the same chip
* for radio but different pci controller.
*
* I didn't have any specs I reversed engineered the protocol from
* the windows driver (radio.dll).
*
* The card uses the TEA5757 chip that includes a search function but it
* is useless as I haven't found any way to read back the frequency. If
* anybody does please mail me.
*
* For the pdf file see:
* http://www.nxp.com/acrobat_download2/expired_datasheets/TEA5757_5759_3.pdf
*
*
* CHANGES:
* 0.75b
* - better pci interface thanks to Francois Romieu <[email protected]>
*
* 0.75 Sun Feb 4 22:51:27 EET 2001
* - tiding up
* - removed support for multiple devices as it didn't work anyway
*
* BUGS:
* - card unmutes if you change frequency
*
* (c) 2006, 2007 by Mauro Carvalho Chehab <[email protected]>:
* - Conversion to V4L2 API
* - Uses video_ioctl2 for parsing and to add debug support
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/videodev2.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <media/drv-intf/tea575x.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
MODULE_AUTHOR("Dimitromanolakis Apostolos, [email protected]");
MODULE_DESCRIPTION("Radio driver for the Guillemot Maxi Radio FM2000.");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
static int radio_nr = -1;
module_param(radio_nr, int, 0644);
MODULE_PARM_DESC(radio_nr, "Radio device number");
/* TEA5757 pin mappings */
static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
static atomic_t maxiradio_instance = ATOMIC_INIT(0);
#define PCI_VENDOR_ID_GUILLEMOT 0x5046
#define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
struct maxiradio
{
struct snd_tea575x tea;
struct v4l2_device v4l2_dev;
struct pci_dev *pdev;
u16 io; /* base of radio io */
};
static inline struct maxiradio *to_maxiradio(struct v4l2_device *v4l2_dev)
{
return container_of(v4l2_dev, struct maxiradio, v4l2_dev);
}
static void maxiradio_tea575x_set_pins(struct snd_tea575x *tea, u8 pins)
{
struct maxiradio *dev = tea->private_data;
u8 bits = 0;
bits |= (pins & TEA575X_DATA) ? data : 0;
bits |= (pins & TEA575X_CLK) ? clk : 0;
bits |= (pins & TEA575X_WREN) ? wren : 0;
bits |= power;
outb(bits, dev->io);
}
/* Note: this card cannot read out the data of the shift registers,
only the mono/stereo pin works. */
static u8 maxiradio_tea575x_get_pins(struct snd_tea575x *tea)
{
struct maxiradio *dev = tea->private_data;
u8 bits = inb(dev->io);
return ((bits & data) ? TEA575X_DATA : 0) |
((bits & mo_st) ? TEA575X_MOST : 0);
}
static void maxiradio_tea575x_set_direction(struct snd_tea575x *tea, bool output)
{
}
static const struct snd_tea575x_ops maxiradio_tea_ops = {
.set_pins = maxiradio_tea575x_set_pins,
.get_pins = maxiradio_tea575x_get_pins,
.set_direction = maxiradio_tea575x_set_direction,
};
static int maxiradio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct maxiradio *dev;
struct v4l2_device *v4l2_dev;
int retval = -ENOMEM;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&pdev->dev, "not enough memory\n");
return -ENOMEM;
}
v4l2_dev = &dev->v4l2_dev;
v4l2_device_set_name(v4l2_dev, "maxiradio", &maxiradio_instance);
retval = v4l2_device_register(&pdev->dev, v4l2_dev);
if (retval < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
goto errfr;
}
dev->tea.private_data = dev;
dev->tea.ops = &maxiradio_tea_ops;
/* The data pin cannot be read. This may be a hardware limitation, or
we just don't know how to read it. */
dev->tea.cannot_read_data = true;
dev->tea.v4l2_dev = v4l2_dev;
dev->tea.radio_nr = radio_nr;
strscpy(dev->tea.card, "Maxi Radio FM2000", sizeof(dev->tea.card));
retval = -ENODEV;
if (!request_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), v4l2_dev->name)) {
dev_err(&pdev->dev, "can't reserve I/O ports\n");
goto err_hdl;
}
if (pci_enable_device(pdev))
goto err_out_free_region;
dev->io = pci_resource_start(pdev, 0);
if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
goto err_out_free_region;
}
return 0;
err_out_free_region:
release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
err_hdl:
v4l2_device_unregister(v4l2_dev);
errfr:
kfree(dev);
return retval;
}
static void maxiradio_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
struct maxiradio *dev = to_maxiradio(v4l2_dev);
snd_tea575x_exit(&dev->tea);
/* Turn off power */
outb(0, dev->io);
v4l2_device_unregister(v4l2_dev);
release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
kfree(dev);
}
static const struct pci_device_id maxiradio_pci_tbl[] = {
{ PCI_VENDOR_ID_GUILLEMOT, PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, maxiradio_pci_tbl);
static struct pci_driver maxiradio_driver = {
.name = "radio-maxiradio",
.id_table = maxiradio_pci_tbl,
.probe = maxiradio_probe,
.remove = maxiradio_remove,
};
module_pci_driver(maxiradio_driver);
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ST_PINCFG_H_
#define _ST_PINCFG_H_
/* Alternate functions */
#define ALT1 1
#define ALT2 2
#define ALT3 3
#define ALT4 4
#define ALT5 5
#define ALT6 6
#define ALT7 7
/* Output enable */
#define OE (1 << 27)
/* Pull Up */
#define PU (1 << 26)
/* Open Drain */
#define OD (1 << 25)
#define RT (1 << 23)
#define INVERTCLK (1 << 22)
#define CLKNOTDATA (1 << 21)
#define DOUBLE_EDGE (1 << 20)
#define CLK_A (0 << 18)
#define CLK_B (1 << 18)
#define CLK_C (2 << 18)
#define CLK_D (3 << 18)
/* User-frendly defines for Pin Direction */
/* oe = 0, pu = 0, od = 0 */
#define IN (0)
/* oe = 0, pu = 1, od = 0 */
#define IN_PU (PU)
/* oe = 1, pu = 0, od = 0 */
#define OUT (OE)
/* oe = 1, pu = 0, od = 1 */
#define BIDIR (OE | OD)
/* oe = 1, pu = 1, od = 1 */
#define BIDIR_PU (OE | PU | OD)
/* RETIME_TYPE */
/*
* B Mode
* Bypass retime with optional delay parameter
*/
#define BYPASS (0)
/*
* R0, R1, R0D, R1D modes
* single-edge data non inverted clock, retime data with clk
*/
#define SE_NICLK_IO (RT)
/*
* RIV0, RIV1, RIV0D, RIV1D modes
* single-edge data inverted clock, retime data with clk
*/
#define SE_ICLK_IO (RT | INVERTCLK)
/*
* R0E, R1E, R0ED, R1ED modes
* double-edge data, retime data with clk
*/
#define DE_IO (RT | DOUBLE_EDGE)
/*
* CIV0, CIV1 modes with inverted clock
* Retiming the clk pins will park clock & reduce the noise within the core.
*/
#define ICLK (RT | CLKNOTDATA | INVERTCLK)
/*
* CLK0, CLK1 modes with non-inverted clock
* Retiming the clk pins will park clock & reduce the noise within the core.
*/
#define NICLK (RT | CLKNOTDATA)
#endif /* _ST_PINCFG_H_ */
|
// SPDX-License-Identifier: GPL-2.0+
/*
* RZ/G2L Display Unit Mode Setting
*
* Copyright (C) 2023 Renesas Electronics Corporation
*
* Based on rcar_du_kms.c
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "rzg2l_du_crtc.h"
#include "rzg2l_du_drv.h"
#include "rzg2l_du_encoder.h"
#include "rzg2l_du_kms.h"
#include "rzg2l_du_vsp.h"
/* -----------------------------------------------------------------------------
* Format helpers
*/
static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
{
.fourcc = DRM_FORMAT_XRGB8888,
.v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
.v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
.hsub = 1,
}
};
const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rzg2l_du_format_infos); ++i) {
if (rzg2l_du_format_infos[i].fourcc == fourcc)
return &rzg2l_du_format_infos[i];
}
return NULL;
}
/* -----------------------------------------------------------------------------
* Frame buffer
*/
int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align = 16 * args->bpp / 8;
args->pitch = roundup(min_pitch, align);
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct rzg2l_du_format_info *format;
unsigned int max_pitch;
format = rzg2l_du_format_info(mode_cmd->pixel_format);
if (!format) {
dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
&mode_cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
/*
* On RZ/G2L the memory interface is handled by the VSP that limits the
* pitch to 65535 bytes.
*/
max_pitch = 65535;
if (mode_cmd->pitches[0] > max_pitch) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
}
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
/* -----------------------------------------------------------------------------
* Initialization
*/
static const struct drm_mode_config_helper_funcs rzg2l_du_mode_config_helper = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rzg2l_du_mode_config_funcs = {
.fb_create = rzg2l_du_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int rzg2l_du_encoders_init_one(struct rzg2l_du_device *rcdu,
enum rzg2l_du_output output,
struct of_endpoint *ep)
{
struct device_node *entity;
int ret;
/* Locate the connected entity and initialize the encoder. */
entity = of_graph_get_remote_port_parent(ep->local_node);
if (!entity) {
dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n",
ep->local_node);
return -ENODEV;
}
if (!of_device_is_available(entity)) {
dev_dbg(rcdu->dev,
"connected entity %pOF is disabled, skipping\n",
entity);
of_node_put(entity);
return -ENODEV;
}
ret = rzg2l_du_encoder_init(rcdu, output, entity);
if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
dev_warn(rcdu->dev,
"failed to initialize encoder %pOF on output %s (%d), skipping\n",
entity, rzg2l_du_output_name(output), ret);
of_node_put(entity);
return ret;
}
static int rzg2l_du_encoders_init(struct rzg2l_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
struct device_node *ep_node;
unsigned int num_encoders = 0;
/*
* Iterate over the endpoints and create one encoder for each output
* pipeline.
*/
for_each_endpoint_of_node(np, ep_node) {
enum rzg2l_du_output output;
struct of_endpoint ep;
unsigned int i;
int ret;
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret < 0) {
of_node_put(ep_node);
return ret;
}
/* Find the output route corresponding to the port number. */
for (i = 0; i < RZG2L_DU_OUTPUT_MAX; ++i) {
if (rcdu->info->routes[i].possible_outputs &&
rcdu->info->routes[i].port == ep.port) {
output = i;
break;
}
}
if (i == RZG2L_DU_OUTPUT_MAX) {
dev_warn(rcdu->dev,
"port %u references unexisting output, skipping\n",
ep.port);
continue;
}
/* Process the output pipeline. */
ret = rzg2l_du_encoders_init_one(rcdu, output, &ep);
if (ret < 0) {
if (ret == -EPROBE_DEFER) {
of_node_put(ep_node);
return ret;
}
continue;
}
num_encoders++;
}
return num_encoders;
}
static int rzg2l_du_vsps_init(struct rzg2l_du_device *rcdu)
{
const struct device_node *np = rcdu->dev->of_node;
const char *vsps_prop_name = "renesas,vsps";
struct of_phandle_args args;
struct {
struct device_node *np;
unsigned int crtcs_mask;
} vsps[RZG2L_DU_MAX_VSPS] = { { NULL, }, };
unsigned int vsps_count = 0;
unsigned int cells;
unsigned int i;
int ret;
/*
* First parse the DT vsps property to populate the list of VSPs. Each
* entry contains a pointer to the VSP DT node and a bitmask of the
* connected DU CRTCs.
*/
ret = of_property_count_u32_elems(np, vsps_prop_name);
cells = ret / rcdu->num_crtcs - 1;
if (cells != 1)
return -EINVAL;
for (i = 0; i < rcdu->num_crtcs; ++i) {
unsigned int j;
ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
cells, i, &args);
if (ret < 0)
goto done;
/*
* Add the VSP to the list or update the corresponding existing
* entry if the VSP has already been added.
*/
for (j = 0; j < vsps_count; ++j) {
if (vsps[j].np == args.np)
break;
}
if (j < vsps_count)
of_node_put(args.np);
else
vsps[vsps_count++].np = args.np;
vsps[j].crtcs_mask |= BIT(i);
/*
* Store the VSP pointer and pipe index in the CRTC. If the
* second cell of the 'renesas,vsps' specifier isn't present,
* default to 0.
*/
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
/*
* Then initialize all the VSPs from the node pointers and CRTCs bitmask
* computed previously.
*/
for (i = 0; i < vsps_count; ++i) {
struct rzg2l_du_vsp *vsp = &rcdu->vsps[i];
vsp->index = i;
vsp->dev = rcdu;
ret = rzg2l_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
if (ret)
goto done;
}
done:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
return ret;
}
int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
{
struct drm_device *dev = &rcdu->ddev;
struct drm_encoder *encoder;
unsigned int num_encoders;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.normalize_zpos = true;
dev->mode_config.funcs = &rzg2l_du_mode_config_funcs;
dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
/*
* The RZ DU uses the VSP1 for memory access, and is limited
* to frame sizes of 1920x1080.
*/
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1080;
rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
/*
* Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
ret = drm_vblank_init(dev, rcdu->num_crtcs);
if (ret < 0)
return ret;
/* Initialize the compositors. */
ret = rzg2l_du_vsps_init(rcdu);
if (ret < 0)
return ret;
/* Create the CRTCs. */
ret = rzg2l_du_crtc_create(rcdu);
if (ret < 0)
return ret;
/* Initialize the encoders. */
ret = rzg2l_du_encoders_init(rcdu);
if (ret < 0)
return dev_err_probe(rcdu->dev, ret,
"failed to initialize encoders\n");
if (ret == 0) {
dev_err(rcdu->dev, "error: no encoder could be initialized\n");
return -EINVAL;
}
num_encoders = ret;
/*
* Set the possible CRTCs and possible clones. There's always at least
* one way for all encoders to clone each other, set all bits in the
* possible clones field.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder);
const struct rzg2l_du_output_routing *route =
&rcdu->info->routes[renc->output];
encoder->possible_crtcs = route->possible_outputs;
encoder->possible_clones = (1 << num_encoders) - 1;
}
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Core
*
* Copyright (C) 2013-2015 Ideas on Board
* Copyright (C) 2013-2015 Xilinx, Inc.
*
* Contacts: Hyun Kwon <[email protected]>
* Laurent Pinchart <[email protected]>
*/
#include <linux/clk.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/media/xilinx-vip.h>
#include "xilinx-vip.h"
/* -----------------------------------------------------------------------------
* Helper functions
*/
static const struct xvip_video_format xvip_video_formats[] = {
{ XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
2, V4L2_PIX_FMT_YUYV },
{ XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
3, V4L2_PIX_FMT_YUV444 },
{ XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
3, 0 },
{ XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
1, V4L2_PIX_FMT_GREY },
{ XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
1, V4L2_PIX_FMT_SRGGB8 },
{ XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
1, V4L2_PIX_FMT_SGRBG8 },
{ XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
1, V4L2_PIX_FMT_SGBRG8 },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
1, V4L2_PIX_FMT_SBGGR8 },
{ XVIP_VF_MONO_SENSOR, 12, "mono", MEDIA_BUS_FMT_Y12_1X12,
2, V4L2_PIX_FMT_Y12 },
};
/**
* xvip_get_format_by_code - Retrieve format information for a media bus code
* @code: the format media bus code
*
* Return: a pointer to the format information structure corresponding to the
* given V4L2 media bus format @code, or ERR_PTR if no corresponding format can
* be found.
*/
const struct xvip_video_format *xvip_get_format_by_code(unsigned int code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
const struct xvip_video_format *format = &xvip_video_formats[i];
if (format->code == code)
return format;
}
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(xvip_get_format_by_code);
/**
* xvip_get_format_by_fourcc - Retrieve format information for a 4CC
* @fourcc: the format 4CC
*
* Return: a pointer to the format information structure corresponding to the
* given V4L2 format @fourcc. If not found, return a pointer to the first
* available format (V4L2_PIX_FMT_YUYV).
*/
const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
const struct xvip_video_format *format = &xvip_video_formats[i];
if (format->fourcc == fourcc)
return format;
}
return &xvip_video_formats[0];
}
EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
/**
* xvip_of_get_format - Parse a device tree node and return format information
* @node: the device tree node
*
* Read the xlnx,video-format, xlnx,video-width and xlnx,cfa-pattern properties
* from the device tree @node passed as an argument and return the corresponding
* format information.
*
* Return: a pointer to the format information structure corresponding to the
* format name and width, or ERR_PTR if no corresponding format can be found.
*/
const struct xvip_video_format *xvip_of_get_format(struct device_node *node)
{
const char *pattern = "mono";
unsigned int vf_code;
unsigned int i;
u32 width;
int ret;
ret = of_property_read_u32(node, "xlnx,video-format", &vf_code);
if (ret < 0)
return ERR_PTR(ret);
ret = of_property_read_u32(node, "xlnx,video-width", &width);
if (ret < 0)
return ERR_PTR(ret);
if (vf_code == XVIP_VF_MONO_SENSOR)
of_property_read_string(node, "xlnx,cfa-pattern", &pattern);
for (i = 0; i < ARRAY_SIZE(xvip_video_formats); ++i) {
const struct xvip_video_format *format = &xvip_video_formats[i];
if (format->vf_code != vf_code || format->width != width)
continue;
if (vf_code == XVIP_VF_MONO_SENSOR &&
strcmp(pattern, format->pattern))
continue;
return format;
}
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(xvip_of_get_format);
/**
* xvip_set_format_size - Set the media bus frame format size
* @format: V4L2 frame format on media bus
* @fmt: media bus format
*
* Set the media bus frame format size. The width / height from the subdevice
* format are set to the given media bus format. The new format size is stored
* in @format. The width and height are clamped using default min / max values.
*/
void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
const struct v4l2_subdev_format *fmt)
{
format->width = clamp_t(unsigned int, fmt->format.width,
XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
format->height = clamp_t(unsigned int, fmt->format.height,
XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
}
EXPORT_SYMBOL_GPL(xvip_set_format_size);
/**
* xvip_clr_or_set - Clear or set the register with a bitmask
* @xvip: Xilinx Video IP device
* @addr: address of register
* @mask: bitmask to be set or cleared
* @set: boolean flag indicating whether to set or clear
*
* Clear or set the register at address @addr with a bitmask @mask depending on
* the boolean flag @set. When the flag @set is true, the bitmask is set in
* the register, otherwise the bitmask is cleared from the register
* when the flag @set is false.
*
* Fox example, this function can be used to set a control with a boolean value
* requested by users. If the caller knows whether to set or clear in the first
* place, the caller should call xvip_clr() or xvip_set() directly instead of
* using this function.
*/
void xvip_clr_or_set(struct xvip_device *xvip, u32 addr, u32 mask, bool set)
{
u32 reg;
reg = xvip_read(xvip, addr);
reg = set ? reg | mask : reg & ~mask;
xvip_write(xvip, addr, reg);
}
EXPORT_SYMBOL_GPL(xvip_clr_or_set);
/**
* xvip_clr_and_set - Clear and set the register with a bitmask
* @xvip: Xilinx Video IP device
* @addr: address of register
* @clr: bitmask to be cleared
* @set: bitmask to be set
*
* Clear a bit(s) of mask @clr in the register at address @addr, then set
* a bit(s) of mask @set in the register after.
*/
void xvip_clr_and_set(struct xvip_device *xvip, u32 addr, u32 clr, u32 set)
{
u32 reg;
reg = xvip_read(xvip, addr);
reg &= ~clr;
reg |= set;
xvip_write(xvip, addr, reg);
}
EXPORT_SYMBOL_GPL(xvip_clr_and_set);
int xvip_init_resources(struct xvip_device *xvip)
{
struct platform_device *pdev = to_platform_device(xvip->dev);
xvip->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xvip->iomem))
return PTR_ERR(xvip->iomem);
xvip->clk = devm_clk_get(xvip->dev, NULL);
if (IS_ERR(xvip->clk))
return PTR_ERR(xvip->clk);
clk_prepare_enable(xvip->clk);
return 0;
}
EXPORT_SYMBOL_GPL(xvip_init_resources);
void xvip_cleanup_resources(struct xvip_device *xvip)
{
clk_disable_unprepare(xvip->clk);
}
EXPORT_SYMBOL_GPL(xvip_cleanup_resources);
/* -----------------------------------------------------------------------------
* Subdev operations handlers
*/
/**
* xvip_enum_mbus_code - Enumerate the media format code
* @subdev: V4L2 subdevice
* @sd_state: V4L2 subdev state
* @code: returning media bus code
*
* Enumerate the media bus code of the subdevice. Return the corresponding
* pad format code. This function only works for subdevices with fixed format
* on all pads. Subdevices with multiple format should have their own
* function to enumerate mbus codes.
*
* Return: 0 if the media bus code is found, or -EINVAL if the format index
* is not valid.
*/
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct v4l2_mbus_framefmt *format;
/* Enumerating frame sizes based on the active configuration isn't
* supported yet.
*/
if (code->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
if (code->index)
return -EINVAL;
format = v4l2_subdev_state_get_format(sd_state, code->pad);
code->code = format->code;
return 0;
}
EXPORT_SYMBOL_GPL(xvip_enum_mbus_code);
/**
* xvip_enum_frame_size - Enumerate the media bus frame size
* @subdev: V4L2 subdevice
* @sd_state: V4L2 subdev state
* @fse: returning media bus frame size
*
* This function is a drop-in implementation of the subdev enum_frame_size pad
* operation. It assumes that the subdevice has one sink pad and one source
* pad, and that the format on the source pad is always identical to the
* format on the sink pad. Entities with different requirements need to
* implement their own enum_frame_size handlers.
*
* Return: 0 if the media bus frame size is found, or -EINVAL
* if the index or the code is not valid.
*/
int xvip_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
/* Enumerating frame sizes based on the active configuration isn't
* supported yet.
*/
if (fse->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
format = v4l2_subdev_state_get_format(sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
if (fse->pad == XVIP_PAD_SINK) {
fse->min_width = XVIP_MIN_WIDTH;
fse->max_width = XVIP_MAX_WIDTH;
fse->min_height = XVIP_MIN_HEIGHT;
fse->max_height = XVIP_MAX_HEIGHT;
} else {
/* The size on the source pad is fixed and always identical to
* the size on the sink pad.
*/
fse->min_width = format->width;
fse->max_width = format->width;
fse->min_height = format->height;
fse->max_height = format->height;
}
return 0;
}
EXPORT_SYMBOL_GPL(xvip_enum_frame_size);
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
#include <linux/tty_buffer.h>
#include <linux/tty_port.h>
struct tty_ldisc;
int tty_buffer_set_limit(struct tty_port *port, int limit);
unsigned int tty_buffer_space_avail(struct tty_port *port);
int tty_buffer_request_room(struct tty_port *port, size_t size);
size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
const u8 *flags, bool mutable_flags,
size_t size);
size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size);
void tty_flip_buffer_push(struct tty_port *port);
/**
* tty_insert_flip_string_fixed_flag - add characters to the tty buffer
* @port: tty port
* @chars: characters
* @flag: flag value for each character
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters passed are
* marked with the supplied flag.
*
* Returns: the number added.
*/
static inline size_t tty_insert_flip_string_fixed_flag(struct tty_port *port,
const u8 *chars, u8 flag,
size_t size)
{
return __tty_insert_flip_string_flags(port, chars, &flag, false, size);
}
/**
* tty_insert_flip_string_flags - add characters to the tty buffer
* @port: tty port
* @chars: characters
* @flags: flag bytes
* @size: size
*
* Queue a series of bytes to the tty buffering. For each character the flags
* array indicates the status of the character.
*
* Returns: the number added.
*/
static inline size_t tty_insert_flip_string_flags(struct tty_port *port,
const u8 *chars,
const u8 *flags, size_t size)
{
return __tty_insert_flip_string_flags(port, chars, flags, true, size);
}
/**
* tty_insert_flip_char - add one character to the tty buffer
* @port: tty port
* @ch: character
* @flag: flag byte
*
* Queue a single byte @ch to the tty buffering, with an optional flag.
*/
static inline size_t tty_insert_flip_char(struct tty_port *port, u8 ch, u8 flag)
{
struct tty_buffer *tb = port->buf.tail;
int change;
change = !tb->flags && (flag != TTY_NORMAL);
if (!change && tb->used < tb->size) {
if (tb->flags)
*flag_buf_ptr(tb, tb->used) = flag;
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
return __tty_insert_flip_string_flags(port, &ch, &flag, false, 1);
}
static inline size_t tty_insert_flip_string(struct tty_port *port,
const u8 *chars, size_t size)
{
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
size_t count);
void tty_buffer_lock_exclusive(struct tty_port *port);
void tty_buffer_unlock_exclusive(struct tty_port *port);
#endif /* _LINUX_TTY_FLIP_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Lineage Compact Power Line series of power entry modules.
*
* Copyright (C) 2010, 2011 Ericsson AB.
*
* Documentation:
* http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
/*
* This driver supports various Lineage Compact Power Line DC/DC and AC/DC
* converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
*
* The devices are nominally PMBus compliant. However, most standard PMBus
* commands are not supported. Specifically, all hardware monitoring and
* status reporting commands are non-standard. For this reason, a standard
* PMBus driver can not be used.
*
* All Lineage CPL devices have a built-in I2C bus master selector (PCA9541).
* To ensure device access, this driver should only be used as client driver
* to the pca9541 I2C master selector driver.
*/
/* Command codes */
#define PEM_OPERATION 0x01
#define PEM_CLEAR_INFO_FLAGS 0x03
#define PEM_VOUT_COMMAND 0x21
#define PEM_VOUT_OV_FAULT_LIMIT 0x40
#define PEM_READ_DATA_STRING 0xd0
#define PEM_READ_INPUT_STRING 0xdc
#define PEM_READ_FIRMWARE_REV 0xdd
#define PEM_READ_RUN_TIMER 0xde
#define PEM_FAN_HI_SPEED 0xdf
#define PEM_FAN_NORMAL_SPEED 0xe0
#define PEM_READ_FAN_SPEED 0xe1
/* offsets in data string */
#define PEM_DATA_STATUS_2 0
#define PEM_DATA_STATUS_1 1
#define PEM_DATA_ALARM_2 2
#define PEM_DATA_ALARM_1 3
#define PEM_DATA_VOUT_LSB 4
#define PEM_DATA_VOUT_MSB 5
#define PEM_DATA_CURRENT 6
#define PEM_DATA_TEMP 7
/* Virtual entries, to report constants */
#define PEM_DATA_TEMP_MAX 10
#define PEM_DATA_TEMP_CRIT 11
/* offsets in input string */
#define PEM_INPUT_VOLTAGE 0
#define PEM_INPUT_POWER_LSB 1
#define PEM_INPUT_POWER_MSB 2
/* offsets in fan data */
#define PEM_FAN_ADJUSTMENT 0
#define PEM_FAN_FAN1 1
#define PEM_FAN_FAN2 2
#define PEM_FAN_FAN3 3
/* Status register bits */
#define STS1_OUTPUT_ON (1 << 0)
#define STS1_LEDS_FLASHING (1 << 1)
#define STS1_EXT_FAULT (1 << 2)
#define STS1_SERVICE_LED_ON (1 << 3)
#define STS1_SHUTDOWN_OCCURRED (1 << 4)
#define STS1_INT_FAULT (1 << 5)
#define STS1_ISOLATION_TEST_OK (1 << 6)
#define STS2_ENABLE_PIN_HI (1 << 0)
#define STS2_DATA_OUT_RANGE (1 << 1)
#define STS2_RESTARTED_OK (1 << 1)
#define STS2_ISOLATION_TEST_FAIL (1 << 3)
#define STS2_HIGH_POWER_CAP (1 << 4)
#define STS2_INVALID_INSTR (1 << 5)
#define STS2_WILL_RESTART (1 << 6)
#define STS2_PEC_ERR (1 << 7)
/* Alarm register bits */
#define ALRM1_VIN_OUT_LIMIT (1 << 0)
#define ALRM1_VOUT_OUT_LIMIT (1 << 1)
#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2)
#define ALRM1_VIN_OVERCURRENT (1 << 3)
#define ALRM1_TEMP_WARNING (1 << 4)
#define ALRM1_TEMP_SHUTDOWN (1 << 5)
#define ALRM1_PRIMARY_FAULT (1 << 6)
#define ALRM1_POWER_LIMIT (1 << 7)
#define ALRM2_5V_OUT_LIMIT (1 << 1)
#define ALRM2_TEMP_FAULT (1 << 2)
#define ALRM2_OV_LOW (1 << 3)
#define ALRM2_DCDC_TEMP_HIGH (1 << 4)
#define ALRM2_PRI_TEMP_HIGH (1 << 5)
#define ALRM2_NO_PRIMARY (1 << 6)
#define ALRM2_FAN_FAULT (1 << 7)
#define FIRMWARE_REV_LEN 4
#define DATA_STRING_LEN 9
#define INPUT_STRING_LEN 5 /* 4 for most devices */
#define FAN_SPEED_LEN 5
struct pem_data {
struct i2c_client *client;
const struct attribute_group *groups[4];
struct mutex update_lock;
bool valid;
bool fans_supported;
int input_length;
unsigned long last_updated; /* in jiffies */
u8 firmware_rev[FIRMWARE_REV_LEN];
u8 data_string[DATA_STRING_LEN];
u8 input_string[INPUT_STRING_LEN];
u8 fan_speed[FAN_SPEED_LEN];
};
static int pem_read_block(struct i2c_client *client, u8 command, u8 *data,
int data_len)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX];
int result;
result = i2c_smbus_read_block_data(client, command, block_buffer);
if (unlikely(result < 0))
goto abort;
if (unlikely(result == 0xff || result != data_len)) {
result = -EIO;
goto abort;
}
memcpy(data, block_buffer, data_len);
result = 0;
abort:
return result;
}
static struct pem_data *pem_update_device(struct device *dev)
{
struct pem_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
struct pem_data *ret = data;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
int result;
/* Read data string */
result = pem_read_block(client, PEM_READ_DATA_STRING,
data->data_string,
sizeof(data->data_string));
if (unlikely(result < 0)) {
ret = ERR_PTR(result);
goto abort;
}
/* Read input string */
if (data->input_length) {
result = pem_read_block(client, PEM_READ_INPUT_STRING,
data->input_string,
data->input_length);
if (unlikely(result < 0)) {
ret = ERR_PTR(result);
goto abort;
}
}
/* Read fan speeds */
if (data->fans_supported) {
result = pem_read_block(client, PEM_READ_FAN_SPEED,
data->fan_speed,
sizeof(data->fan_speed));
if (unlikely(result < 0)) {
ret = ERR_PTR(result);
goto abort;
}
}
i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
data->last_updated = jiffies;
data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
return ret;
}
static long pem_get_data(u8 *data, int len, int index)
{
long val;
switch (index) {
case PEM_DATA_VOUT_LSB:
val = (data[index] + (data[index+1] << 8)) * 5 / 2;
break;
case PEM_DATA_CURRENT:
val = data[index] * 200;
break;
case PEM_DATA_TEMP:
val = data[index] * 1000;
break;
case PEM_DATA_TEMP_MAX:
val = 97 * 1000; /* 97 degrees C per datasheet */
break;
case PEM_DATA_TEMP_CRIT:
val = 107 * 1000; /* 107 degrees C per datasheet */
break;
default:
WARN_ON_ONCE(1);
val = 0;
}
return val;
}
static long pem_get_input(u8 *data, int len, int index)
{
long val;
switch (index) {
case PEM_INPUT_VOLTAGE:
if (len == INPUT_STRING_LEN)
val = (data[index] + (data[index+1] << 8) - 75) * 1000;
else
val = (data[index] - 75) * 1000;
break;
case PEM_INPUT_POWER_LSB:
if (len == INPUT_STRING_LEN)
index++;
val = (data[index] + (data[index+1] << 8)) * 1000000L;
break;
default:
WARN_ON_ONCE(1);
val = 0;
}
return val;
}
static long pem_get_fan(u8 *data, int len, int index)
{
long val;
switch (index) {
case PEM_FAN_FAN1:
case PEM_FAN_FAN2:
case PEM_FAN_FAN3:
val = data[index] * 100;
break;
default:
WARN_ON_ONCE(1);
val = 0;
}
return val;
}
/*
* Show boolean, either a fault or an alarm.
* .nr points to the register, .index is the bit mask to check
*/
static ssize_t pem_bool_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
struct pem_data *data = pem_update_device(dev);
u8 status;
if (IS_ERR(data))
return PTR_ERR(data);
status = data->data_string[attr->nr] & attr->index;
return sysfs_emit(buf, "%d\n", !!status);
}
static ssize_t pem_data_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pem_data *data = pem_update_device(dev);
long value;
if (IS_ERR(data))
return PTR_ERR(data);
value = pem_get_data(data->data_string, sizeof(data->data_string),
attr->index);
return sysfs_emit(buf, "%ld\n", value);
}
static ssize_t pem_input_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pem_data *data = pem_update_device(dev);
long value;
if (IS_ERR(data))
return PTR_ERR(data);
value = pem_get_input(data->input_string, sizeof(data->input_string),
attr->index);
return sysfs_emit(buf, "%ld\n", value);
}
static ssize_t pem_fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pem_data *data = pem_update_device(dev);
long value;
if (IS_ERR(data))
return PTR_ERR(data);
value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed),
attr->index);
return sysfs_emit(buf, "%ld\n", value);
}
/* Voltages */
static SENSOR_DEVICE_ATTR_RO(in1_input, pem_data, PEM_DATA_VOUT_LSB);
static SENSOR_DEVICE_ATTR_2_RO(in1_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_VOUT_OUT_LIMIT);
static SENSOR_DEVICE_ATTR_2_RO(in1_crit_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_OV_VOLT_SHUTDOWN);
static SENSOR_DEVICE_ATTR_RO(in2_input, pem_input, PEM_INPUT_VOLTAGE);
static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
/* Currents */
static SENSOR_DEVICE_ATTR_RO(curr1_input, pem_data, PEM_DATA_CURRENT);
static SENSOR_DEVICE_ATTR_2_RO(curr1_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_VIN_OVERCURRENT);
/* Power */
static SENSOR_DEVICE_ATTR_RO(power1_input, pem_input, PEM_INPUT_POWER_LSB);
static SENSOR_DEVICE_ATTR_2_RO(power1_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_POWER_LIMIT);
/* Fans */
static SENSOR_DEVICE_ATTR_RO(fan1_input, pem_fan, PEM_FAN_FAN1);
static SENSOR_DEVICE_ATTR_RO(fan2_input, pem_fan, PEM_FAN_FAN2);
static SENSOR_DEVICE_ATTR_RO(fan3_input, pem_fan, PEM_FAN_FAN3);
static SENSOR_DEVICE_ATTR_2_RO(fan1_alarm, pem_bool, PEM_DATA_ALARM_2,
ALRM2_FAN_FAULT);
/* Temperatures */
static SENSOR_DEVICE_ATTR_RO(temp1_input, pem_data, PEM_DATA_TEMP);
static SENSOR_DEVICE_ATTR_RO(temp1_max, pem_data, PEM_DATA_TEMP_MAX);
static SENSOR_DEVICE_ATTR_RO(temp1_crit, pem_data, PEM_DATA_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2_RO(temp1_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_TEMP_WARNING);
static SENSOR_DEVICE_ATTR_2_RO(temp1_crit_alarm, pem_bool, PEM_DATA_ALARM_1,
ALRM1_TEMP_SHUTDOWN);
static SENSOR_DEVICE_ATTR_2_RO(temp1_fault, pem_bool, PEM_DATA_ALARM_2,
ALRM2_TEMP_FAULT);
static struct attribute *pem_attributes[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_curr1_alarm.dev_attr.attr,
&sensor_dev_attr_power1_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
NULL,
};
static const struct attribute_group pem_group = {
.attrs = pem_attributes,
};
static struct attribute *pem_input_attributes[] = {
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
NULL
};
static const struct attribute_group pem_input_group = {
.attrs = pem_input_attributes,
};
static struct attribute *pem_fan_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
NULL
};
static const struct attribute_group pem_fan_group = {
.attrs = pem_fan_attributes,
};
static int pem_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct pem_data *data;
int ret, idx = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE))
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
mutex_init(&data->update_lock);
/*
* We use the next two commands to determine if the device is really
* there.
*/
ret = pem_read_block(client, PEM_READ_FIRMWARE_REV,
data->firmware_rev, sizeof(data->firmware_rev));
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
if (ret < 0)
return ret;
dev_info(dev, "Firmware revision %d.%d.%d\n",
data->firmware_rev[0], data->firmware_rev[1],
data->firmware_rev[2]);
/* sysfs hooks */
data->groups[idx++] = &pem_group;
/*
* Check if input readings are supported.
* This is the case if we can read input data,
* and if the returned data is not all zeros.
* Note that input alarms are always supported.
*/
ret = pem_read_block(client, PEM_READ_INPUT_STRING,
data->input_string,
sizeof(data->input_string) - 1);
if (!ret && (data->input_string[0] || data->input_string[1] ||
data->input_string[2]))
data->input_length = sizeof(data->input_string) - 1;
else if (ret < 0) {
/* Input string is one byte longer for some devices */
ret = pem_read_block(client, PEM_READ_INPUT_STRING,
data->input_string,
sizeof(data->input_string));
if (!ret && (data->input_string[0] || data->input_string[1] ||
data->input_string[2] || data->input_string[3]))
data->input_length = sizeof(data->input_string);
}
if (data->input_length)
data->groups[idx++] = &pem_input_group;
/*
* Check if fan speed readings are supported.
* This is the case if we can read fan speed data,
* and if the returned data is not all zeros.
* Note that the fan alarm is always supported.
*/
ret = pem_read_block(client, PEM_READ_FAN_SPEED,
data->fan_speed,
sizeof(data->fan_speed));
if (!ret && (data->fan_speed[0] || data->fan_speed[1] ||
data->fan_speed[2] || data->fan_speed[3])) {
data->fans_supported = true;
data->groups[idx++] = &pem_fan_group;
}
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id pem_id[] = {
{"lineage_pem"},
{}
};
MODULE_DEVICE_TABLE(i2c, pem_id);
static struct i2c_driver pem_driver = {
.driver = {
.name = "lineage_pem",
},
.probe = pem_probe,
.id_table = pem_id,
};
module_i2c_driver(pem_driver);
MODULE_AUTHOR("Guenter Roeck <[email protected]>");
MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* cx18 buffer queues
*
* Derived from ivtv-queue.h
*
* Copyright (C) 2007 Hans Verkuil <[email protected]>
* Copyright (C) 2008 Andy Walls <[email protected]>
*/
#define CX18_DMA_UNMAPPED ((u32) -1)
/* cx18_buffer utility functions */
static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s,
struct cx18_buffer *buf)
{
dma_sync_single_for_cpu(&s->cx->pci_dev->dev, buf->dma_handle,
s->buf_size, s->dma);
}
static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
struct cx18_buffer *buf)
{
dma_sync_single_for_device(&s->cx->pci_dev->dev, buf->dma_handle,
s->buf_size, s->dma);
}
void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl);
static inline void cx18_mdl_sync_for_device(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
if (list_is_singular(&mdl->buf_list))
cx18_buf_sync_for_device(s, list_first_entry(&mdl->buf_list,
struct cx18_buffer,
list));
else
_cx18_mdl_sync_for_device(s, mdl);
}
void cx18_buf_swap(struct cx18_buffer *buf);
void _cx18_mdl_swap(struct cx18_mdl *mdl);
static inline void cx18_mdl_swap(struct cx18_mdl *mdl)
{
if (list_is_singular(&mdl->buf_list))
cx18_buf_swap(list_first_entry(&mdl->buf_list,
struct cx18_buffer, list));
else
_cx18_mdl_swap(mdl);
}
/* cx18_queue utility functions */
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q, int to_front);
static inline
struct cx18_queue *cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q)
{
return _cx18_enqueue(s, mdl, q, 0); /* FIFO */
}
static inline
struct cx18_queue *cx18_push(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q)
{
return _cx18_enqueue(s, mdl, q, 1); /* LIFO */
}
void cx18_queue_init(struct cx18_queue *q);
struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
u32 bytesused);
void cx18_flush_queues(struct cx18_stream *s);
/* queue MDL reconfiguration helpers */
void cx18_unload_queues(struct cx18_stream *s);
void cx18_load_queues(struct cx18_stream *s);
/* cx18_stream utility functions */
int cx18_stream_alloc(struct cx18_stream *s);
void cx18_stream_free(struct cx18_stream *s);
|
// SPDX-License-Identifier: GPL-2.0
/*
* SH7264 Pinmux
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7264_pfc_resources[] = {
[0] = {
.start = 0xfffe3800,
.end = 0xfffe393f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7264", sh7264_pfc_resources,
ARRAY_SIZE(sh7264_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Conexant CX24120/CX24118 - DVB-S/S2 demod/tuner driver
*
* Copyright (C) 2008 Patrick Boettcher <[email protected]>
* Copyright (C) 2009 Sergey Tyurin <forum.free-x.de>
* Updated 2012 by Jannis Achstetter <[email protected]>
* Copyright (C) 2015 Jemma Denson <[email protected]>
*/
#ifndef CX24120_H
#define CX24120_H
#include <linux/dvb/frontend.h>
#include <linux/firmware.h>
struct cx24120_initial_mpeg_config {
u8 x1;
u8 x2;
u8 x3;
};
struct cx24120_config {
u8 i2c_addr;
u32 xtal_khz;
struct cx24120_initial_mpeg_config initial_mpeg_config;
int (*request_firmware)(struct dvb_frontend *fe,
const struct firmware **fw, char *name);
/* max bytes I2C provider can write at once */
u16 i2c_wr_max;
};
#if IS_REACHABLE(CONFIG_DVB_CX24120)
struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
struct i2c_adapter *i2c);
#else
static inline
struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
struct i2c_adapter *i2c)
{
pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
#endif /* CX24120_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
/dts-v1/;
#include "rtl83xx.dtsi"
#include "rtl838x.dtsi"
/ {
model = "Cisco SG220-26";
compatible = "cisco,sg220-26", "realtek,rtl8382-soc";
chosen {
stdout-path = "serial0:9600n8";
bootargs = "earlycon console=ttyS0,9600";
};
memory@0 {
device_type = "memory";
reg = <0x0 0x8000000>;
};
};
&uart0 {
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* UBI attaching sub-system.
*
* This sub-system is responsible for attaching MTD devices and it also
* implements flash media scanning.
*
* The attaching information is represented by a &struct ubi_attach_info'
* object. Information about volumes is represented by &struct ubi_ainf_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
*
* Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
* objects are kept in per-volume RB-trees with the root at the corresponding
* &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
* per-volume objects and each of these objects is the root of RB-tree of
* per-LEB objects.
*
* Corrupted physical eraseblocks are put to the @corr list, free physical
* eraseblocks are put to the @free list and the physical eraseblock to be
* erased are put to the @erase list.
*
* About corruptions
* ~~~~~~~~~~~~~~~~~
*
* UBI protects EC and VID headers with CRC-32 checksums, so it can detect
* whether the headers are corrupted or not. Sometimes UBI also protects the
* data with CRC-32, e.g., when it executes the atomic LEB change operation, or
* when it moves the contents of a PEB for wear-leveling purposes.
*
* UBI tries to distinguish between 2 types of corruptions.
*
* 1. Corruptions caused by power cuts. These are expected corruptions and UBI
* tries to handle them gracefully, without printing too many warnings and
* error messages. The idea is that we do not lose important data in these
* cases - we may lose only the data which were being written to the media just
* before the power cut happened, and the upper layers (e.g., UBIFS) are
* supposed to handle such data losses (e.g., by using the FS journal).
*
* When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
* the reason is a power cut, UBI puts this PEB to the @erase list, and all
* PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
* attaching, such PEBs are put to the @corr list and UBI preserves them.
* Obviously, this lessens the amount of available PEBs, and if at some point
* UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
* about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
* corruptions and UBI's strategy is as follows (in case of attaching by
* scanning). UBI assumes corruption type 2 if the VID header is corrupted and
* the data area does not contain all 0xFFs, and there were no bit-flips or
* integrity errors (e.g., ECC errors in case of NAND) while reading the data
* area. Otherwise UBI assumes corruption type 1. So the decision criteria
* are as follows.
* o If the data area contains only 0xFFs, there are no data, and it is safe
* to just erase this PEB - this is corruption type 1.
* o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
* happened, so this is corruption type 1. However, this is just a guess,
* which might be wrong.
* o Otherwise this is corruption type 2.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/math64.h>
#include <linux/random.h>
#include "ubi.h"
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
#define AV_FIND BIT(0)
#define AV_ADD BIT(1)
#define AV_FIND_OR_ADD (AV_FIND | AV_ADD)
/**
* find_or_add_av - internal function to find a volume, add a volume or do
* both (find and add if missing).
* @ai: attaching information
* @vol_id: the requested volume ID
* @flags: a combination of the %AV_FIND and %AV_ADD flags describing the
* expected operation. If only %AV_ADD is set, -EEXIST is returned
* if the volume already exists. If only %AV_FIND is set, NULL is
* returned if the volume does not exist. And if both flags are
* set, the helper first tries to find an existing volume, and if
* it does not exist it creates a new one.
* @created: in value used to inform the caller whether it"s a newly created
* volume or not.
*
* This function returns a pointer to a volume description or an ERR_PTR if
* the operation failed. It can also return NULL if only %AV_FIND is set and
* the volume does not exist.
*/
static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai,
int vol_id, unsigned int flags,
bool *created)
{
struct ubi_ainf_volume *av;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (vol_id == av->vol_id) {
*created = false;
if (!(flags & AV_FIND))
return ERR_PTR(-EEXIST);
return av;
}
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
if (!(flags & AV_ADD))
return NULL;
/* The volume is absent - add it */
av = kzalloc(sizeof(*av), GFP_KERNEL);
if (!av)
return ERR_PTR(-ENOMEM);
av->vol_id = vol_id;
if (vol_id > ai->highest_vol_id)
ai->highest_vol_id = vol_id;
rb_link_node(&av->rb, parent, p);
rb_insert_color(&av->rb, &ai->volumes);
ai->vols_found += 1;
*created = true;
dbg_bld("added volume %d", vol_id);
return av;
}
/**
* ubi_find_or_add_av - search for a volume in the attaching information and
* add one if it does not exist.
* @ai: attaching information
* @vol_id: the requested volume ID
* @created: whether the volume has been created or not
*
* This function returns a pointer to the new volume description or an
* ERR_PTR if the operation failed.
*/
static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai,
int vol_id, bool *created)
{
return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created);
}
/**
* ubi_alloc_aeb - allocate an aeb element
* @ai: attaching information
* @pnum: physical eraseblock number
* @ec: erase counter of the physical eraseblock
*
* Allocate an aeb object and initialize the pnum and ec information.
* vol_id and lnum are set to UBI_UNKNOWN, and the other fields are
* initialized to zero.
* Note that the element is not added in any list or RB tree.
*/
struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
int ec)
{
struct ubi_ainf_peb *aeb;
aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
if (!aeb)
return NULL;
aeb->pnum = pnum;
aeb->ec = ec;
aeb->vol_id = UBI_UNKNOWN;
aeb->lnum = UBI_UNKNOWN;
return aeb;
}
/**
* ubi_free_aeb - free an aeb element
* @ai: attaching information
* @aeb: the element to free
*
* Free an aeb object. The caller must have removed the element from any list
* or RB tree.
*/
void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
{
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
/**
* add_to_list - add physical eraseblock to a list.
* @ai: attaching information
* @pnum: physical eraseblock number to add
* @vol_id: the last used volume id for the PEB
* @lnum: the last used LEB number for the PEB
* @ec: erase counter of the physical eraseblock
* @to_head: if not zero, add to the head of the list
* @list: the list to add to
*
* This function allocates a 'struct ubi_ainf_peb' object for physical
* eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
* It stores the @lnum and @vol_id alongside, which can both be
* %UBI_UNKNOWN if they are not available, not readable, or not assigned.
* If @to_head is not zero, PEB will be added to the head of the list, which
* basically means it will be processed first later. E.g., we add corrupted
* PEBs (corrupted due to power cuts) to the head of the erase list to make
* sure we erase them first and get rid of corruptions ASAP. This function
* returns zero in case of success and a negative error code in case of
* failure.
*/
static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
int lnum, int ec, int to_head, struct list_head *list)
{
struct ubi_ainf_peb *aeb;
if (list == &ai->free) {
dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
} else if (list == &ai->erase) {
dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
} else if (list == &ai->alien) {
dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
ai->alien_peb_count += 1;
} else
BUG();
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
if (to_head)
list_add(&aeb->u.list, list);
else
list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_corrupted - add a corrupted physical eraseblock.
* @ai: attaching information
* @pnum: physical eraseblock number to add
* @ec: erase counter of the physical eraseblock
*
* This function allocates a 'struct ubi_ainf_peb' object for a corrupted
* physical eraseblock @pnum and adds it to the 'corr' list. The corruption
* was presumably not caused by a power cut. Returns zero in case of success
* and a negative error code in case of failure.
*/
static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
{
struct ubi_ainf_peb *aeb;
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
ai->corr_peb_count += 1;
list_add(&aeb->u.list, &ai->corr);
return 0;
}
/**
* add_fastmap - add a Fastmap related physical eraseblock.
* @ai: attaching information
* @pnum: physical eraseblock number the VID header came from
* @vid_hdr: the volume identifier header
* @ec: erase counter of the physical eraseblock
*
* This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
* physical eraseblock @pnum and adds it to the 'fastmap' list.
* Such blocks can be Fastmap super and data blocks from both the most
* recent Fastmap we're attaching from or from old Fastmaps which will
* be erased.
*/
static int add_fastmap(struct ubi_attach_info *ai, int pnum,
struct ubi_vid_hdr *vid_hdr, int ec)
{
struct ubi_ainf_peb *aeb;
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
list_add(&aeb->u.list, &ai->fastmap);
dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
aeb->vol_id, aeb->sqnum);
return 0;
}
/**
* validate_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
* @av: information about the volume this logical eraseblock belongs to
* @pnum: physical eraseblock number the VID header came from
*
* This function checks that data stored in @vid_hdr is consistent. Returns
* non-zero if an inconsistency was found and zero if not.
*
* Note, UBI does sanity check of everything it reads from the flash media.
* Most of the checks are done in the I/O sub-system. Here we check that the
* information in the VID header is consistent to the information in other VID
* headers of the same volume.
*/
static int validate_vid_hdr(const struct ubi_device *ubi,
const struct ubi_vid_hdr *vid_hdr,
const struct ubi_ainf_volume *av, int pnum)
{
int vol_type = vid_hdr->vol_type;
int vol_id = be32_to_cpu(vid_hdr->vol_id);
int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
int data_pad = be32_to_cpu(vid_hdr->data_pad);
if (av->leb_count != 0) {
int av_vol_type;
/*
* This is not the first logical eraseblock belonging to this
* volume. Ensure that the data in its VID header is consistent
* to the data in previous logical eraseblock headers.
*/
if (vol_id != av->vol_id) {
ubi_err(ubi, "inconsistent vol_id");
goto bad;
}
if (av->vol_type == UBI_STATIC_VOLUME)
av_vol_type = UBI_VID_STATIC;
else
av_vol_type = UBI_VID_DYNAMIC;
if (vol_type != av_vol_type) {
ubi_err(ubi, "inconsistent vol_type");
goto bad;
}
if (used_ebs != av->used_ebs) {
ubi_err(ubi, "inconsistent used_ebs");
goto bad;
}
if (data_pad != av->data_pad) {
ubi_err(ubi, "inconsistent data_pad");
goto bad;
}
}
return 0;
bad:
ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
ubi_dump_av(av);
return -EINVAL;
}
/**
* add_volume - add volume to the attaching information.
* @ai: attaching information
* @vol_id: ID of the volume to add
* @pnum: physical eraseblock number
* @vid_hdr: volume identifier header
*
* If the volume corresponding to the @vid_hdr logical eraseblock is already
* present in the attaching information, this function does nothing. Otherwise
* it adds corresponding volume to the attaching information. Returns a pointer
* to the allocated "av" object in case of success and a negative error code in
* case of failure.
*/
static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
int vol_id, int pnum,
const struct ubi_vid_hdr *vid_hdr)
{
struct ubi_ainf_volume *av;
bool created;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
av = ubi_find_or_add_av(ai, vol_id, &created);
if (IS_ERR(av) || !created)
return av;
av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
av->data_pad = be32_to_cpu(vid_hdr->data_pad);
av->compat = vid_hdr->compat;
av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
return av;
}
/**
* ubi_compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
* @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
* compare
* @vid_hdr: volume identifier header of the second logical eraseblock
*
* This function compares 2 copies of a LEB and informs which one is newer. In
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
* o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
* o bit 1 is set: bit-flips were detected in the newer LEB;
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_io_buf *vidb = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (sqnum2 == aeb->sqnum) {
/*
* This must be a really ancient UBI image which has been
* created before sequence numbers support has been added. At
* that times we used 32-bit LEB versions stored in logical
* eraseblocks. That was before UBI got into mainline. We do not
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
ubi_err(ubi, "unsupported on-flash UBI format");
return -EINVAL;
}
/* Obviously the LEB with lower sequence counter is older */
second_is_newer = (sqnum2 > aeb->sqnum);
/*
* Now we know which copy is newer. If the copy flag of the PEB with
* newer version is not set, then we just return, otherwise we have to
* check data CRC. For the second PEB we already have the VID header,
* for the first one - we'll need to re-read it from flash.
*
* Note: this may be optimized so that we wouldn't read twice.
*/
if (second_is_newer) {
if (!vid_hdr->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("second PEB %d is newer, copy_flag is unset",
pnum);
return 1;
}
} else {
if (!aeb->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("first PEB %d is newer, copy_flag is unset",
pnum);
return bitflips << 1;
}
vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vidb)
return -ENOMEM;
pnum = aeb->pnum;
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
pnum, err);
if (err > 0)
err = -EIO;
goto out_free_vidh;
}
}
vid_hdr = ubi_get_vid_hdr(vidb);
}
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
mutex_lock(&ubi->buf_mutex);
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
corrupted = 1;
bitflips = 0;
second_is_newer = !second_is_newer;
} else {
dbg_bld("PEB %d CRC is OK", pnum);
bitflips |= !!err;
}
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_buf(vidb);
if (second_is_newer)
dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
else
dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
return second_is_newer | (bitflips << 1) | (corrupted << 2);
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_buf(vidb);
return err;
}
/**
* ubi_add_to_av - add used physical eraseblock to the attaching information.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
* @ec: erase counter
* @vid_hdr: the volume identifier header
* @bitflips: if bit-flips were detected when this physical eraseblock was read
*
* This function adds information about a used physical eraseblock to the
* 'used' tree of the corresponding volume. The function is rather complex
* because it has to handle cases when this is not the first physical
* eraseblock belonging to the same logical eraseblock, and the newer one has
* to be picked, while the older one has to be dropped. This function returns
* zero in case of success and a negative error code in case of failure.
*/
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
{
int err, vol_id, lnum;
unsigned long long sqnum;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb;
struct rb_node **p, *parent = NULL;
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
sqnum = be64_to_cpu(vid_hdr->sqnum);
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, bitflips);
av = add_volume(ai, vol_id, pnum, vid_hdr);
if (IS_ERR(av))
return PTR_ERR(av);
if (ai->max_sqnum < sqnum)
ai->max_sqnum = sqnum;
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
*/
p = &av->root.rb_node;
while (*p) {
int cmp_res;
parent = *p;
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (lnum != aeb->lnum) {
if (lnum < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
}
/*
* There is already a physical eraseblock describing the same
* logical eraseblock present.
*/
dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
aeb->pnum, aeb->sqnum, aeb->ec);
/*
* Make sure that the logical eraseblocks have different
* sequence numbers. Otherwise the image is bad.
*
* However, if the sequence number is zero, we assume it must
* be an ancient UBI image from the era when UBI did not have
* sequence numbers. We still can attach these images, unless
* there is a need to distinguish between old and new
* eraseblocks, in which case we'll refuse the image in
* 'ubi_compare_lebs()'. In other words, we attach old clean
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
if (aeb->sqnum == sqnum && sqnum != 0) {
ubi_err(ubi, "two LEBs with same sequence number %llu",
sqnum);
ubi_dump_aeb(aeb, 0);
ubi_dump_vid_hdr(vid_hdr);
return -EINVAL;
}
/*
* Now we have to drop the older one and preserve the newer
* one.
*/
cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
if (cmp_res & 1) {
/*
* This logical eraseblock is newer than the one
* found earlier.
*/
err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
if (err)
return err;
err = add_to_list(ai, aeb->pnum, aeb->vol_id,
aeb->lnum, aeb->ec, cmp_res & 4,
&ai->erase);
if (err)
return err;
aeb->ec = ec;
aeb->pnum = pnum;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->scrub = ((cmp_res & 2) || bitflips);
aeb->copy_flag = vid_hdr->copy_flag;
aeb->sqnum = sqnum;
if (av->highest_lnum == lnum)
av->last_data_size =
be32_to_cpu(vid_hdr->data_size);
return 0;
} else {
/*
* This logical eraseblock is older than the one found
* previously.
*/
return add_to_list(ai, pnum, vol_id, lnum, ec,
cmp_res & 4, &ai->erase);
}
}
/*
* We've met this logical eraseblock for the first time, add it to the
* attaching information.
*/
err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
if (err)
return err;
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->scrub = bitflips;
aeb->copy_flag = vid_hdr->copy_flag;
aeb->sqnum = sqnum;
if (av->highest_lnum <= lnum) {
av->highest_lnum = lnum;
av->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
av->leb_count += 1;
rb_link_node(&aeb->u.rb, parent, p);
rb_insert_color(&aeb->u.rb, &av->root);
return 0;
}
/**
* ubi_add_av - add volume to the attaching information.
* @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the new volume description or an
* ERR_PTR if the operation failed.
*/
struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id)
{
bool created;
return find_or_add_av(ai, vol_id, AV_ADD, &created);
}
/**
* ubi_find_av - find volume in the attaching information.
* @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the volume description or %NULL if there
* are no data about this volume in the attaching information.
*/
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
int vol_id)
{
bool created;
return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND,
&created);
}
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
struct list_head *list);
/**
* ubi_remove_av - delete attaching information about a volume.
* @ai: attaching information
* @av: the volume attaching information to delete
*/
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
dbg_bld("remove attaching information about volume %d", av->vol_id);
rb_erase(&av->rb, &ai->volumes);
destroy_av(ai, av, &ai->erase);
ai->vols_found -= 1;
}
/**
* early_erase_peb - erase a physical eraseblock.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: physical eraseblock number to erase;
* @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
*
* This function erases physical eraseblock 'pnum', and writes the erase
* counter header to it. This function should only be used on UBI device
* initialization stages, when the EBA sub-system had not been yet initialized.
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int early_erase_peb(struct ubi_device *ubi,
const struct ubi_attach_info *ai, int pnum, int ec)
{
int err;
struct ubi_ec_hdr *ec_hdr;
if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. Upgrade UBI and use 64-bit
* erase counters internally.
*/
ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
pnum, ec);
return -EINVAL;
}
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ec_hdr)
return -ENOMEM;
ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_sync_erase(ubi, pnum, 0);
if (err < 0)
goto out_free;
err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
out_free:
kfree(ec_hdr);
return err;
}
/**
* ubi_early_get_peb - get a free physical eraseblock.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns a free physical eraseblock. It is supposed to be
* called on the UBI initialization stages when the wear-leveling sub-system is
* not initialized yet. This function picks a physical eraseblocks from one of
* the lists, writes the EC header if it is needed, and removes it from the
* list.
*
* This function returns a pointer to the "aeb" of the found free PEB in case
* of success and an error code in case of failure.
*/
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
struct ubi_attach_info *ai)
{
int err = 0;
struct ubi_ainf_peb *aeb, *tmp_aeb;
if (!list_empty(&ai->free)) {
aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
list_del(&aeb->u.list);
dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
return aeb;
}
/*
* We try to erase the first physical eraseblock from the erase list
* and pick it if we succeed, or try to erase the next one if not. And
* so forth. We don't want to take care about bad eraseblocks here -
* they'll be handled later.
*/
list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
if (err)
continue;
aeb->ec += 1;
list_del(&aeb->u.list);
dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
return aeb;
}
ubi_err(ubi, "no free eraseblocks");
return ERR_PTR(-ENOSPC);
}
/**
* check_corruption - check the data area of PEB.
* @ubi: UBI device description object
* @vid_hdr: the (corrupted) VID header of this PEB
* @pnum: the physical eraseblock number to check
*
* This is a helper function which is used to distinguish between VID header
* corruptions caused by power cuts and other reasons. If the PEB contains only
* 0xFF bytes in the data area, the VID header is most probably corrupted
* because of a power cut (%0 is returned in this case). Otherwise, it was
* probably corrupted for some other reasons (%1 is returned in this case). A
* negative error code is returned if a read error occurred.
*
* If the corruption reason was a power cut, UBI can safely erase this PEB.
* Otherwise, it should preserve it to avoid possibly destroying important
* information.
*/
static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
int pnum)
{
int err;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf, 0x00, ubi->leb_size);
err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
ubi->leb_size);
if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* Bit-flips or integrity errors while reading the data area.
* It is difficult to say for sure what type of corruption is
* this, but presumably a power cut happened while this PEB was
* erased, so it became unstable and corrupted, and should be
* erased.
*/
err = 0;
goto out_unlock;
}
if (err)
goto out_unlock;
if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
goto out_unlock;
ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
pnum);
ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
ubi_dump_vid_hdr(vid_hdr);
pr_err("hexdump of PEB %d offset %d, length %d",
pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ubi->peb_buf, ubi->leb_size, 1);
err = 1;
out_unlock:
mutex_unlock(&ubi->buf_mutex);
return err;
}
static bool vol_ignored(int vol_id)
{
switch (vol_id) {
case UBI_LAYOUT_VOLUME_ID:
return true;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
return ubi_is_fm_vol(vol_id);
#else
return false;
#endif
}
/**
* scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
* @fast: true if we're scanning for a Fastmap
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
* "attaching info" structure. Returns zero if the physical eraseblock was
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
int pnum, bool fast)
{
struct ubi_ec_hdr *ech = ai->ech;
struct ubi_vid_io_buf *vidb = ai->vidb;
struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
/* Skip bad physical eraseblocks */
err = ubi_io_is_bad(ubi, pnum);
if (err < 0)
return err;
else if (err) {
ai->bad_peb_count += 1;
return 0;
}
err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (err < 0)
return err;
switch (err) {
case 0:
break;
case UBI_IO_BITFLIPS:
bitflips = 1;
break;
case UBI_IO_FF:
ai->empty_peb_count += 1;
return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
UBI_UNKNOWN, 0, &ai->erase);
case UBI_IO_FF_BITFLIPS:
ai->empty_peb_count += 1;
return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
UBI_UNKNOWN, 1, &ai->erase);
case UBI_IO_BAD_HDR_EBADMSG:
case UBI_IO_BAD_HDR:
/*
* We have to also look at the VID header, possibly it is not
* corrupted. Set %bitflips flag in order to make this PEB be
* moved and EC be re-created.
*/
ec_err = err;
ec = UBI_UNKNOWN;
bitflips = 1;
break;
default:
ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
err);
return -EINVAL;
}
if (!ec_err) {
int image_seq;
/* Make sure UBI version is OK */
if (ech->version != UBI_VERSION) {
ubi_err(ubi, "this UBI version is %d, image version is %d",
UBI_VERSION, (int)ech->version);
return -EINVAL;
}
ec = be64_to_cpu(ech->ec);
if (ec > UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. The EC headers have 64 bits
* reserved, but we anyway make use of only 31 bit
* values, as this seems to be enough for any existing
* flash. Upgrade UBI and use 64-bit erase counters
* internally.
*/
ubi_err(ubi, "erase counter overflow, max is %d",
UBI_MAX_ERASECOUNTER);
ubi_dump_ec_hdr(ech);
return -EINVAL;
}
/*
* Make sure that all PEBs have the same image sequence number.
* This allows us to detect situations when users flash UBI
* images incorrectly, so that the flash has the new UBI image
* and leftovers from the old one. This feature was added
* relatively recently, and the sequence number was always
* zero, because old UBI implementations always set it to zero.
* For this reasons, we do not panic if some PEBs have zero
* sequence number, while other PEBs have non-zero sequence
* number.
*/
image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
ubi->image_seq = image_seq;
if (image_seq && ubi->image_seq != image_seq) {
ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
return -EINVAL;
}
}
/* OK, we've done with the EC header, let's look at the VID header */
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err < 0)
return err;
switch (err) {
case 0:
break;
case UBI_IO_BITFLIPS:
bitflips = 1;
break;
case UBI_IO_BAD_HDR_EBADMSG:
if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
/*
* Both EC and VID headers are corrupted and were read
* with data integrity error, probably this is a bad
* PEB, bit it is not marked as bad yet. This may also
* be a result of power cut during erasure.
*/
ai->maybe_bad_peb_count += 1;
fallthrough;
case UBI_IO_BAD_HDR:
/*
* If we're facing a bad VID header we have to drop *all*
* Fastmap data structures we find. The most recent Fastmap
* could be bad and therefore there is a chance that we attach
* from an old one. On a fine MTD stack a PEB must not render
* bad all of a sudden, but the reality is different.
* So, let's be paranoid and help finding the root cause by
* falling back to scanning mode instead of attaching with a
* bad EBA table and cause data corruption which is hard to
* analyze.
*/
if (fast)
ai->force_full_scan = 1;
if (ec_err)
/*
* Both headers are corrupted. There is a possibility
* that this a valid UBI PEB which has corresponding
* LEB, but the headers are corrupted. However, it is
* impossible to distinguish it from a PEB which just
* contains garbage because of a power cut during erase
* operation. So we just schedule this PEB for erasure.
*
* Besides, in case of NOR flash, we deliberately
* corrupt both headers because NOR flash erasure is
* slow and can start from the end.
*/
err = 0;
else
/*
* The EC was OK, but the VID header is corrupted. We
* have to check what is in the data area.
*/
err = check_corruption(ubi, vidh, pnum);
if (err < 0)
return err;
else if (!err)
/* This corruption is caused by a power cut */
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 1, &ai->erase);
else
/* This is an unexpected corruption */
err = add_corrupted(ai, pnum, ec);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF_BITFLIPS:
err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
ec, 1, &ai->erase);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF:
if (ec_err || bitflips)
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 1, &ai->erase);
else
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 0, &ai->free);
if (err)
return err;
goto adjust_mean_ec;
default:
ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
err);
return -EINVAL;
}
vol_id = be32_to_cpu(vidh->vol_id);
if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
return err;
return 0;
case UBI_COMPAT_RO:
ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
vol_id, lnum);
ubi->ro_mode = 1;
break;
case UBI_COMPAT_PRESERVE:
ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 0, &ai->alien);
if (err)
return err;
return 0;
case UBI_COMPAT_REJECT:
ubi_err(ubi, "incompatible internal volume %d:%d found",
vol_id, lnum);
return -EINVAL;
}
}
if (ec_err)
ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
pnum);
if (ubi_is_fm_vol(vol_id))
err = add_fastmap(ai, pnum, vidh, ec);
else
err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
if (err)
return err;
adjust_mean_ec:
if (!ec_err) {
ai->ec_sum += ec;
ai->ec_count += 1;
if (ec > ai->max_ec)
ai->max_ec = ec;
if (ec < ai->min_ec)
ai->min_ec = ec;
}
return 0;
}
/**
* late_analysis - analyze the overall situation with PEB.
* @ubi: UBI device description object
* @ai: attaching information
*
* This is a helper function which takes a look what PEBs we have after we
* gather information about all of them ("ai" is compete). It decides whether
* the flash is empty and should be formatted of whether there are too many
* corrupted PEBs and we should not attach this MTD device. Returns zero if we
* should proceed with attaching the MTD device, and %-EINVAL if we should not.
*/
static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb;
int max_corr, peb_count;
peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
max_corr = peb_count / 20 ?: 8;
/*
* Few corrupted PEBs is not a problem and may be just a result of
* unclean reboots. However, many of them may indicate some problems
* with the flash HW or driver.
*/
if (ai->corr_peb_count) {
ubi_err(ubi, "%d PEBs are corrupted and preserved",
ai->corr_peb_count);
pr_err("Corrupted PEBs are:");
list_for_each_entry(aeb, &ai->corr, u.list)
pr_cont(" %d", aeb->pnum);
pr_cont("\n");
/*
* If too many PEBs are corrupted, we refuse attaching,
* otherwise, only print a warning.
*/
if (ai->corr_peb_count >= max_corr) {
ubi_err(ubi, "too many corrupted PEBs, refusing");
return -EINVAL;
}
}
if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
/*
* All PEBs are empty, or almost all - a couple PEBs look like
* they may be bad PEBs which were not marked as bad yet.
*
* This piece of code basically tries to distinguish between
* the following situations:
*
* 1. Flash is empty, but there are few bad PEBs, which are not
* marked as bad so far, and which were read with error. We
* want to go ahead and format this flash. While formatting,
* the faulty PEBs will probably be marked as bad.
*
* 2. Flash contains non-UBI data and we do not want to format
* it and destroy possibly important information.
*/
if (ai->maybe_bad_peb_count <= 2) {
ai->is_empty = 1;
ubi_msg(ubi, "empty MTD device detected");
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
} else {
ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
return -EINVAL;
}
}
return 0;
}
/**
* destroy_av - free volume attaching information.
* @av: volume attaching information
* @ai: attaching information
* @list: put the aeb elements in there if !NULL, otherwise free them
*
* This function destroys the volume attaching information.
*/
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
struct list_head *list)
{
struct ubi_ainf_peb *aeb;
struct rb_node *this = av->root.rb_node;
while (this) {
if (this->rb_left)
this = this->rb_left;
else if (this->rb_right)
this = this->rb_right;
else {
aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &aeb->u.rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
if (list)
list_add_tail(&aeb->u.list, list);
else
ubi_free_aeb(ai, aeb);
}
}
kfree(av);
}
/**
* destroy_ai - destroy attaching information.
* @ai: attaching information
*/
static void destroy_ai(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb, *aeb_tmp;
struct ubi_ainf_volume *av;
struct rb_node *rb;
list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
av = rb_entry(rb, struct ubi_ainf_volume, rb);
rb = rb_parent(rb);
if (rb) {
if (rb->rb_left == &av->rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
destroy_av(ai, av, NULL);
}
}
kmem_cache_destroy(ai->aeb_slab_cache);
kfree(ai);
}
/**
* scan_all - scan entire MTD device.
* @ubi: UBI device description object
* @ai: attach info object
* @start: start scanning at this PEB
*
* This function does full scanning of an MTD device and returns complete
* information about it in form of a "struct ubi_attach_info" object. In case
* of failure, an error code is returned.
*/
static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
int start)
{
int err, pnum;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb;
err = -ENOMEM;
ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ai->ech)
return err;
ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!ai->vidb)
goto out_ech;
for (pnum = start; pnum < ubi->peb_count; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, ai, pnum, false);
if (err < 0)
goto out_vidh;
}
ubi_msg(ubi, "scanning is finished");
/* Calculate mean erase counter */
if (ai->ec_count)
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
err = late_analysis(ubi, ai);
if (err)
goto out_vidh;
/*
* In case of unknown erase counter we use the mean erase counter
* value.
*/
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
}
list_for_each_entry(aeb, &ai->free, u.list) {
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
}
list_for_each_entry(aeb, &ai->corr, u.list)
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
list_for_each_entry(aeb, &ai->erase, u.list)
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
err = self_check_ai(ubi, ai);
if (err)
goto out_vidh;
ubi_free_vid_buf(ai->vidb);
kfree(ai->ech);
return 0;
out_vidh:
ubi_free_vid_buf(ai->vidb);
out_ech:
kfree(ai->ech);
return err;
}
static struct ubi_attach_info *alloc_ai(const char *slab_name)
{
struct ubi_attach_info *ai;
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
if (!ai)
return ai;
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create(slab_name,
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
kfree(ai);
ai = NULL;
}
return ai;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* scan_fast - try to find a fastmap and attach from it.
* @ubi: UBI device description object
* @ai: attach info object
*
* Returns 0 on success, negative return values indicate an internal
* error.
* UBI_NO_FASTMAP denotes that no fastmap was found.
* UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{
int err, pnum;
struct ubi_attach_info *scan_ai;
err = -ENOMEM;
scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap");
if (!scan_ai)
goto out;
scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!scan_ai->ech)
goto out_ai;
scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!scan_ai->vidb)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, scan_ai, pnum, true);
if (err < 0)
goto out_vidh;
}
ubi_free_vid_buf(scan_ai->vidb);
kfree(scan_ai->ech);
if (scan_ai->force_full_scan)
err = UBI_NO_FASTMAP;
else
err = ubi_scan_fastmap(ubi, *ai, scan_ai);
if (err) {
/*
* Didn't attach via fastmap, do a full scan but reuse what
* we've aready scanned.
*/
destroy_ai(*ai);
*ai = scan_ai;
} else
destroy_ai(scan_ai);
return err;
out_vidh:
ubi_free_vid_buf(scan_ai->vidb);
out_ech:
kfree(scan_ai->ech);
out_ai:
destroy_ai(scan_ai);
out:
return err;
}
#endif
/**
* ubi_attach - attach an MTD device.
* @ubi: UBI device descriptor
* @force_scan: if set to non-zero attach by scanning
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_attach(struct ubi_device *ubi, int force_scan)
{
int err;
struct ubi_attach_info *ai;
ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
#ifdef CONFIG_MTD_UBI_FASTMAP
/* On small flash devices we disable fastmap in any case. */
if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
ubi->fm_disabled = 1;
force_scan = 1;
}
if (force_scan)
err = scan_all(ubi, ai, 0);
else {
err = scan_fast(ubi, &ai);
if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) {
destroy_ai(ai);
ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
err = scan_all(ubi, ai, 0);
} else {
err = scan_all(ubi, ai, UBI_FM_MAX_START);
}
}
}
#else
err = scan_all(ubi, ai, 0);
#endif
if (err)
goto out_ai;
ubi->bad_peb_count = ai->bad_peb_count;
ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
ubi->corr_peb_count = ai->corr_peb_count;
ubi->max_ec = ai->max_ec;
ubi->mean_ec = ai->mean_ec;
dbg_gen("max. sequence number: %llu", ai->max_sqnum);
err = ubi_read_volume_table(ubi, ai);
if (err)
goto out_ai;
err = ubi_wl_init(ubi, ai);
if (err)
goto out_vtbl;
err = ubi_eba_init(ubi, ai);
if (err)
goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap");
if (!scan_ai) {
err = -ENOMEM;
goto out_wl;
}
err = scan_all(ubi, scan_ai, 0);
if (err) {
destroy_ai(scan_ai);
goto out_wl;
}
err = self_check_eba(ubi, ai, scan_ai);
destroy_ai(scan_ai);
if (err)
goto out_wl;
}
#endif
destroy_ai(ai);
return 0;
out_wl:
ubi_wl_close(ubi);
out_vtbl:
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_ai:
destroy_ai(ai);
return err;
}
/**
* self_check_ai - check the attaching information.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns zero if the attaching information is all right, and a
* negative error code if not or if an error occurred.
*/
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
struct ubi_vid_io_buf *vidb = ai->vidb;
struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
* At first, check that attaching information is OK.
*/
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
int leb_count = 0;
cond_resched();
vols_found += 1;
if (ai->is_empty) {
ubi_err(ubi, "bad is_empty flag");
goto bad_av;
}
if (av->vol_id < 0 || av->highest_lnum < 0 ||
av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
av->data_pad < 0 || av->last_data_size < 0) {
ubi_err(ubi, "negative values");
goto bad_av;
}
if (av->vol_id >= UBI_MAX_VOLUMES &&
av->vol_id < UBI_INTERNAL_VOL_START) {
ubi_err(ubi, "bad vol_id");
goto bad_av;
}
if (av->vol_id > ai->highest_vol_id) {
ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
ai->highest_vol_id, av->vol_id);
goto out;
}
if (av->vol_type != UBI_DYNAMIC_VOLUME &&
av->vol_type != UBI_STATIC_VOLUME) {
ubi_err(ubi, "bad vol_type");
goto bad_av;
}
if (av->data_pad > ubi->leb_size / 2) {
ubi_err(ubi, "bad data_pad");
goto bad_av;
}
last_aeb = NULL;
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
last_aeb = aeb;
leb_count += 1;
if (aeb->pnum < 0 || aeb->ec < 0) {
ubi_err(ubi, "negative values");
goto bad_aeb;
}
if (aeb->ec < ai->min_ec) {
ubi_err(ubi, "bad ai->min_ec (%d), %d found",
ai->min_ec, aeb->ec);
goto bad_aeb;
}
if (aeb->ec > ai->max_ec) {
ubi_err(ubi, "bad ai->max_ec (%d), %d found",
ai->max_ec, aeb->ec);
goto bad_aeb;
}
if (aeb->pnum >= ubi->peb_count) {
ubi_err(ubi, "too high PEB number %d, total PEBs %d",
aeb->pnum, ubi->peb_count);
goto bad_aeb;
}
if (av->vol_type == UBI_STATIC_VOLUME) {
if (aeb->lnum >= av->used_ebs) {
ubi_err(ubi, "bad lnum or used_ebs");
goto bad_aeb;
}
} else {
if (av->used_ebs != 0) {
ubi_err(ubi, "non-zero used_ebs");
goto bad_aeb;
}
}
if (aeb->lnum > av->highest_lnum) {
ubi_err(ubi, "incorrect highest_lnum or lnum");
goto bad_aeb;
}
}
if (av->leb_count != leb_count) {
ubi_err(ubi, "bad leb_count, %d objects in the tree",
leb_count);
goto bad_av;
}
if (!last_aeb)
continue;
aeb = last_aeb;
if (aeb->lnum != av->highest_lnum) {
ubi_err(ubi, "bad highest_lnum");
goto bad_aeb;
}
}
if (vols_found != ai->vols_found) {
ubi_err(ubi, "bad ai->vols_found %d, should be %d",
ai->vols_found, vols_found);
goto out;
}
/* Check that attaching information is correct */
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
last_aeb = NULL;
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
int vol_type;
cond_resched();
last_aeb = aeb;
err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err(ubi, "VID header is not OK (%d)",
err);
if (err > 0)
err = -EIO;
return err;
}
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
if (av->vol_type != vol_type) {
ubi_err(ubi, "bad vol_type");
goto bad_vid_hdr;
}
if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
goto bad_vid_hdr;
}
if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
ubi_err(ubi, "bad vol_id %d", av->vol_id);
goto bad_vid_hdr;
}
if (av->compat != vidh->compat) {
ubi_err(ubi, "bad compat %d", vidh->compat);
goto bad_vid_hdr;
}
if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
ubi_err(ubi, "bad lnum %d", aeb->lnum);
goto bad_vid_hdr;
}
if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
goto bad_vid_hdr;
}
if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
ubi_err(ubi, "bad data_pad %d", av->data_pad);
goto bad_vid_hdr;
}
}
if (!last_aeb)
continue;
if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
goto bad_vid_hdr;
}
if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
ubi_err(ubi, "bad last_data_size %d",
av->last_data_size);
goto bad_vid_hdr;
}
}
/*
* Make sure that all the physical eraseblocks are in one of the lists
* or trees.
*/
buf = kzalloc(ubi->peb_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
err = ubi_io_is_bad(ubi, pnum);
if (err < 0) {
kfree(buf);
return err;
} else if (err)
buf[pnum] = 1;
}
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->free, u.list)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->corr, u.list)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->erase, u.list)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->alien, u.list)
buf[aeb->pnum] = 1;
err = 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++)
if (!buf[pnum]) {
ubi_err(ubi, "PEB %d is not referred", pnum);
err = 1;
}
kfree(buf);
if (err)
goto out;
return 0;
bad_aeb:
ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
ubi_dump_aeb(aeb, 0);
ubi_dump_av(av);
goto out;
bad_av:
ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi_dump_av(av);
goto out;
bad_vid_hdr:
ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi_dump_av(av);
ubi_dump_vid_hdr(vidh);
out:
dump_stack();
return -EINVAL;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
#define _ASM_GENERIC_BITOPS_FFZ_H_
/*
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
#define ffz(x) __ffs(~(x))
#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2020-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*--------------------MES_MAP_PROCESS (PER DEBUG VMID)--------------------*/
#ifndef PM4_MES_MAP_PROCESS_PER_DEBUG_VMID_DEFINED
#define PM4_MES_MAP_PROCESS_PER_DEBUG_VMID_DEFINED
struct pm4_mes_map_process_aldebaran {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
uint32_t pasid:16; /* 0 - 15 */
uint32_t single_memops:1; /* 16 */
uint32_t exec_cleaner_shader:1; /* 17 */
uint32_t debug_vmid:4; /* 18 - 21 */
uint32_t new_debug:1; /* 22 */
uint32_t tmz:1; /* 23 */
uint32_t diq_enable:1; /* 24 */
uint32_t process_quantum:7; /* 25 - 31 */
} bitfields2;
uint32_t ordinal2;
};
uint32_t vm_context_page_table_base_addr_lo32;
uint32_t vm_context_page_table_base_addr_hi32;
uint32_t sh_mem_bases;
uint32_t sh_mem_config;
uint32_t sq_shader_tba_lo;
uint32_t sq_shader_tba_hi;
uint32_t sq_shader_tma_lo;
uint32_t sq_shader_tma_hi;
uint32_t reserved6;
uint32_t gds_addr_lo;
uint32_t gds_addr_hi;
union {
struct {
uint32_t num_gws:7;
uint32_t sdma_enable:1;
uint32_t num_oac:4;
uint32_t gds_size_hi:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields14;
uint32_t ordinal14;
};
uint32_t spi_gdbg_per_vmid_cntl;
uint32_t tcp_watch_cntl[4];
uint32_t completion_signal_lo;
uint32_t completion_signal_hi;
};
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2018 NXP
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
/memreserve/ 0x80000000 0x00010000;
/ {
compatible = "fsl,s32v234";
interrupt-parent = <&gic>;
#address-cells = <2>;
#size-cells = <2>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
};
cpus {
#address-cells = <2>;
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "spin-table";
cpu-release-addr = <0x0 0x80000000>;
next-level-cache = <&cluster0_l2_cache>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "spin-table";
cpu-release-addr = <0x0 0x80000000>;
next-level-cache = <&cluster0_l2_cache>;
};
cpu2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
enable-method = "spin-table";
cpu-release-addr = <0x0 0x80000000>;
next-level-cache = <&cluster1_l2_cache>;
};
cpu3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
enable-method = "spin-table";
cpu-release-addr = <0x0 0x80000000>;
next-level-cache = <&cluster1_l2_cache>;
};
cluster0_l2_cache: l2-cache0 {
compatible = "cache";
cache-level = <2>;
cache-unified;
};
cluster1_l2_cache: l2-cache1 {
compatible = "cache";
cache-level = <2>;
cache-unified;
};
};
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>;
/* clock-frequency might be modified by u-boot, depending on the
* chip version.
*/
clock-frequency = <10000000>;
};
gic: interrupt-controller@7d001000 {
compatible = "arm,cortex-a15-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
reg = <0 0x7d001000 0 0x1000>,
<0 0x7d002000 0 0x2000>,
<0 0x7d004000 0 0x2000>,
<0 0x7d006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_HIGH)>;
};
soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
interrupt-parent = <&gic>;
ranges;
aips0: bus@40000000 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
interrupt-parent = <&gic>;
reg = <0x0 0x40000000 0x0 0x7d000>;
ranges;
uart0: serial@40053000 {
compatible = "fsl,s32v234-linflexuart";
reg = <0x0 0x40053000 0x0 0x1000>;
interrupts = <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
};
aips1: bus@40080000 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
interrupt-parent = <&gic>;
reg = <0x0 0x40080000 0x0 0x70000>;
ranges;
uart1: serial@400bc000 {
compatible = "fsl,s32v234-linflexuart";
reg = <0x0 0x400bc000 0x0 0x1000>;
interrupts = <GIC_SPI 60 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
};
};
};
|
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Helper functions for common, but complicated tasks.
*
*/
#ifndef __CVMX_HELPER_H__
#define __CVMX_HELPER_H__
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-wqe.h>
typedef enum {
CVMX_HELPER_INTERFACE_MODE_DISABLED,
CVMX_HELPER_INTERFACE_MODE_RGMII,
CVMX_HELPER_INTERFACE_MODE_GMII,
CVMX_HELPER_INTERFACE_MODE_SPI,
CVMX_HELPER_INTERFACE_MODE_PCIE,
CVMX_HELPER_INTERFACE_MODE_XAUI,
CVMX_HELPER_INTERFACE_MODE_SGMII,
CVMX_HELPER_INTERFACE_MODE_PICMG,
CVMX_HELPER_INTERFACE_MODE_NPI,
CVMX_HELPER_INTERFACE_MODE_LOOP,
} cvmx_helper_interface_mode_t;
union cvmx_helper_link_info {
uint64_t u64;
struct {
uint64_t reserved_20_63:44;
uint64_t link_up:1; /**< Is the physical link up? */
uint64_t full_duplex:1; /**< 1 if the link is full duplex */
uint64_t speed:18; /**< Speed of the link in Mbps */
} s;
};
#include <asm/octeon/cvmx-helper-errata.h>
#include <asm/octeon/cvmx-helper-loop.h>
#include <asm/octeon/cvmx-helper-npi.h>
#include <asm/octeon/cvmx-helper-rgmii.h>
#include <asm/octeon/cvmx-helper-sgmii.h>
#include <asm/octeon/cvmx-helper-spi.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-helper-xaui.h>
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
* IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* Returns 0 on success
* -1 on failure
*/
extern int cvmx_helper_ipd_and_packet_input_enable(void);
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* Returns Zero on success, non-zero on failure
*/
extern int cvmx_helper_initialize_packet_io_global(void);
/**
* Returns the number of ports on the given interface.
* The interface must be initialized before the port count
* can be returned.
*
* @interface: Which interface to return port count for.
*
* Returns Port count for interface
* -1 for uninitialized interface
*/
extern int cvmx_helper_ports_on_interface(int interface);
/**
* Return the number of interfaces the chip has. Each interface
* may have multiple ports. Most chips support two interfaces,
* but the CNX0XX and CNX1XX are exceptions. These only support
* one interface.
*
* Returns Number of interfaces on chip
*/
extern int cvmx_helper_get_number_of_interfaces(void);
/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
*
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
* DISABLED.
*/
extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
interface);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
extern union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get().
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
extern int cvmx_helper_link_set(int ipd_port,
union cvmx_helper_link_info link_info);
/**
* This function probes an interface to determine the actual
* number of hardware ports connected to it. It doesn't setup the
* ports or enable them. The main goal here is to set the global
* interface_port_count[interface] correctly. Hardware setup of the
* ports will be performed later.
*
* @interface: Interface to probe
*
* Returns Zero on success, negative on failure
*/
extern int cvmx_helper_interface_probe(int interface);
extern int cvmx_helper_interface_enumerate(int interface);
#endif /* __CVMX_HELPER_H__ */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_POWER_R8A779A0_SYSC_H__
#define __DT_BINDINGS_POWER_R8A779A0_SYSC_H__
/*
* These power domain indices match the Power Domain Register Numbers (PDR)
*/
#define R8A779A0_PD_A1E0D0C0 0
#define R8A779A0_PD_A1E0D0C1 1
#define R8A779A0_PD_A1E0D1C0 2
#define R8A779A0_PD_A1E0D1C1 3
#define R8A779A0_PD_A1E1D0C0 4
#define R8A779A0_PD_A1E1D0C1 5
#define R8A779A0_PD_A1E1D1C0 6
#define R8A779A0_PD_A1E1D1C1 7
#define R8A779A0_PD_A2E0D0 16
#define R8A779A0_PD_A2E0D1 17
#define R8A779A0_PD_A2E1D0 18
#define R8A779A0_PD_A2E1D1 19
#define R8A779A0_PD_A3E0 20
#define R8A779A0_PD_A3E1 21
#define R8A779A0_PD_3DG_A 24
#define R8A779A0_PD_3DG_B 25
#define R8A779A0_PD_A1CNN2 32
#define R8A779A0_PD_A1DSP0 33
#define R8A779A0_PD_A2IMP01 34
#define R8A779A0_PD_A2DP0 35
#define R8A779A0_PD_A2CV0 36
#define R8A779A0_PD_A2CV1 37
#define R8A779A0_PD_A2CV4 38
#define R8A779A0_PD_A2CV6 39
#define R8A779A0_PD_A2CN2 40
#define R8A779A0_PD_A1CNN0 41
#define R8A779A0_PD_A2CN0 42
#define R8A779A0_PD_A3IR 43
#define R8A779A0_PD_A1CNN1 44
#define R8A779A0_PD_A1DSP1 45
#define R8A779A0_PD_A2IMP23 46
#define R8A779A0_PD_A2DP1 47
#define R8A779A0_PD_A2CV2 48
#define R8A779A0_PD_A2CV3 49
#define R8A779A0_PD_A2CV5 50
#define R8A779A0_PD_A2CV7 51
#define R8A779A0_PD_A2CN1 52
#define R8A779A0_PD_A3VIP0 56
#define R8A779A0_PD_A3VIP1 57
#define R8A779A0_PD_A3VIP2 58
#define R8A779A0_PD_A3VIP3 59
#define R8A779A0_PD_A3ISP01 60
#define R8A779A0_PD_A3ISP23 61
/* Always-on power area */
#define R8A779A0_PD_ALWAYS_ON 64
#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* Contact Information:
* [email protected]
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include "be.h"
#include "be_cmds.h"
static struct ocrdma_driver *ocrdma_drv;
static LIST_HEAD(be_adapter_list);
static DEFINE_MUTEX(be_adapter_list_lock);
static void _be_roce_dev_add(struct be_adapter *adapter)
{
struct be_dev_info dev_info;
int i, num_vec;
struct pci_dev *pdev = adapter->pdev;
if (!ocrdma_drv)
return;
if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) {
dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n");
return;
}
if (pdev->device == OC_DEVICE_ID5) {
/* only msix is supported on these devices */
if (!msix_enabled(adapter))
return;
/* DPP region address and length */
dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2);
dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2);
} else {
dev_info.dpp_unmapped_addr = 0;
dev_info.dpp_unmapped_len = 0;
}
dev_info.pdev = adapter->pdev;
dev_info.db = adapter->db;
dev_info.unmapped_db = adapter->roce_db.io_addr;
dev_info.db_page_size = adapter->roce_db.size;
dev_info.db_total_size = adapter->roce_db.total_size;
dev_info.netdev = adapter->netdev;
memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN);
dev_info.dev_family = adapter->sli_family;
if (msix_enabled(adapter)) {
/* provide all the vectors, so that EQ creation response
* can decide which one to use.
*/
num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
/* provide start index of the vector,
* so in case of linear usage,
* it can use the base as starting point.
*/
dev_info.msix.start_vector = adapter->num_evt_qs;
for (i = 0; i < dev_info.msix.num_vectors; i++) {
dev_info.msix.vector_list[i] =
adapter->msix_entries[i].vector;
}
} else {
dev_info.msix.num_vectors = 0;
dev_info.intr_mode = BE_INTERRUPT_MODE_INTX;
}
adapter->ocrdma_dev = ocrdma_drv->add(&dev_info);
}
void be_roce_dev_add(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
INIT_LIST_HEAD(&adapter->entry);
mutex_lock(&be_adapter_list_lock);
list_add_tail(&adapter->entry, &be_adapter_list);
/* invoke add() routine of roce driver only if
* valid driver registered with add method and add() is not yet
* invoked on a given adapter.
*/
_be_roce_dev_add(adapter);
mutex_unlock(&be_adapter_list_lock);
}
}
static void _be_roce_dev_remove(struct be_adapter *adapter)
{
if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
ocrdma_drv->remove(adapter->ocrdma_dev);
adapter->ocrdma_dev = NULL;
}
void be_roce_dev_remove(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
_be_roce_dev_remove(adapter);
list_del(&adapter->entry);
mutex_unlock(&be_adapter_list_lock);
}
}
void be_roce_dev_shutdown(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_SHUTDOWN);
mutex_unlock(&be_adapter_list_lock);
}
}
int be_roce_register_driver(struct ocrdma_driver *drv)
{
struct be_adapter *dev;
mutex_lock(&be_adapter_list_lock);
if (ocrdma_drv) {
mutex_unlock(&be_adapter_list_lock);
return -EINVAL;
}
ocrdma_drv = drv;
list_for_each_entry(dev, &be_adapter_list, entry) {
_be_roce_dev_add(dev);
}
mutex_unlock(&be_adapter_list_lock);
return 0;
}
EXPORT_SYMBOL(be_roce_register_driver);
void be_roce_unregister_driver(struct ocrdma_driver *drv)
{
struct be_adapter *dev;
mutex_lock(&be_adapter_list_lock);
list_for_each_entry(dev, &be_adapter_list, entry) {
if (dev->ocrdma_dev)
_be_roce_dev_remove(dev);
}
ocrdma_drv = NULL;
mutex_unlock(&be_adapter_list_lock);
}
EXPORT_SYMBOL(be_roce_unregister_driver);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* timb_dma.c timberdale FPGA DMA driver
* Copyright (c) 2010 Intel Corporation
*/
/* Supports:
* Timberdale FPGA DMA engine
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/timb_dma.h>
#include "dmaengine.h"
#define DRIVER_NAME "timb-dma"
/* Global DMA registers */
#define TIMBDMA_ACR 0x34
#define TIMBDMA_32BIT_ADDR 0x01
#define TIMBDMA_ISR 0x080000
#define TIMBDMA_IPR 0x080004
#define TIMBDMA_IER 0x080008
/* Channel specific registers */
/* RX instances base addresses are 0x00, 0x40, 0x80 ...
* TX instances base addresses are 0x18, 0x58, 0x98 ...
*/
#define TIMBDMA_INSTANCE_OFFSET 0x40
#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
/* RX registers, relative the instance base */
#define TIMBDMA_OFFS_RX_DHAR 0x00
#define TIMBDMA_OFFS_RX_DLAR 0x04
#define TIMBDMA_OFFS_RX_LR 0x0C
#define TIMBDMA_OFFS_RX_BLR 0x10
#define TIMBDMA_OFFS_RX_ER 0x14
#define TIMBDMA_RX_EN 0x01
/* bytes per Row, video specific register
* which is placed after the TX registers...
*/
#define TIMBDMA_OFFS_RX_BPRR 0x30
/* TX registers, relative the instance base */
#define TIMBDMA_OFFS_TX_DHAR 0x00
#define TIMBDMA_OFFS_TX_DLAR 0x04
#define TIMBDMA_OFFS_TX_BLR 0x0C
#define TIMBDMA_OFFS_TX_LR 0x14
#define TIMB_DMA_DESC_SIZE 8
struct timb_dma_desc {
struct list_head desc_node;
struct dma_async_tx_descriptor txd;
u8 *desc_list;
unsigned int desc_list_len;
bool interrupt;
};
struct timb_dma_chan {
struct dma_chan chan;
void __iomem *membase;
spinlock_t lock; /* Used to protect data structures,
especially the lists and descriptors,
from races between the tasklet and calls
from above */
bool ongoing;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
enum dma_transfer_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
struct timb_dma {
struct dma_device dma;
void __iomem *membase;
struct tasklet_struct tasklet;
struct timb_dma_chan channels[];
};
static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
static struct device *chan2dmadev(struct dma_chan *chan)
{
return chan2dev(chan)->parent->parent;
}
static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
{
int id = td_chan->chan.chan_id;
return (struct timb_dma *)((u8 *)td_chan -
id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
}
/* Must be called with the spinlock held */
static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
{
int id = td_chan->chan.chan_id;
struct timb_dma *td = tdchantotd(td_chan);
u32 ier;
/* enable interrupt for this channel */
ier = ioread32(td->membase + TIMBDMA_IER);
ier |= 1 << id;
dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
ier);
iowrite32(ier, td->membase + TIMBDMA_IER);
}
/* Should be called with the spinlock held */
static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
{
int id = td_chan->chan.chan_id;
struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
u32 isr;
bool done = false;
dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
if (isr) {
iowrite32(isr, td->membase + TIMBDMA_ISR);
done = true;
}
return done;
}
static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
struct scatterlist *sg, bool last)
{
if (sg_dma_len(sg) > USHRT_MAX) {
dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
return -EINVAL;
}
/* length must be word aligned */
if (sg_dma_len(sg) % sizeof(u32)) {
dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
sg_dma_len(sg));
return -EINVAL;
}
dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
dma_desc, (unsigned long long)sg_dma_address(sg));
dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
dma_desc[1] = 0x00;
dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
return 0;
}
/* Must be called with the spinlock held */
static void __td_start_dma(struct timb_dma_chan *td_chan)
{
struct timb_dma_desc *td_desc;
if (td_chan->ongoing) {
dev_err(chan2dev(&td_chan->chan),
"Transfer already ongoing\n");
return;
}
td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
desc_node);
dev_dbg(chan2dev(&td_chan->chan),
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
if (td_chan->direction == DMA_DEV_TO_MEM) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
iowrite32(td_desc->txd.phys, td_chan->membase +
TIMBDMA_OFFS_RX_DLAR);
/* Bytes per line */
iowrite32(td_chan->bytes_per_line, td_chan->membase +
TIMBDMA_OFFS_RX_BPRR);
/* enable RX */
iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
} else {
/* address high */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
iowrite32(td_desc->txd.phys, td_chan->membase +
TIMBDMA_OFFS_TX_DLAR);
}
td_chan->ongoing = true;
if (td_desc->interrupt)
__td_enable_chan_irq(td_chan);
}
static void __td_finish(struct timb_dma_chan *td_chan)
{
struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd;
struct timb_dma_desc *td_desc;
/* can happen if the descriptor is canceled */
if (list_empty(&td_chan->active_list))
return;
td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
desc_node);
txd = &td_desc->txd;
dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
txd->cookie);
/* make sure to stop the transfer */
if (td_chan->direction == DMA_DEV_TO_MEM)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
*/
dma_cookie_complete(txd);
td_chan->ongoing = false;
dmaengine_desc_get_callback(txd, &cb);
list_move(&td_desc->desc_node, &td_chan->free_list);
dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
dmaengine_desc_callback_invoke(&cb, NULL);
}
static u32 __td_ier_mask(struct timb_dma *td)
{
int i;
u32 ret = 0;
for (i = 0; i < td->dma.chancnt; i++) {
struct timb_dma_chan *td_chan = td->channels + i;
if (td_chan->ongoing) {
struct timb_dma_desc *td_desc =
list_entry(td_chan->active_list.next,
struct timb_dma_desc, desc_node);
if (td_desc->interrupt)
ret |= 1 << i;
}
}
return ret;
}
static void __td_start_next(struct timb_dma_chan *td_chan)
{
struct timb_dma_desc *td_desc;
BUG_ON(list_empty(&td_chan->queue));
BUG_ON(td_chan->ongoing);
td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
desc_node);
dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
__func__, td_desc->txd.cookie);
list_move(&td_desc->desc_node, &td_chan->active_list);
__td_start_dma(td_chan);
}
static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
txd);
struct timb_dma_chan *td_chan = container_of(txd->chan,
struct timb_dma_chan, chan);
dma_cookie_t cookie;
spin_lock_bh(&td_chan->lock);
cookie = dma_cookie_assign(txd);
if (list_empty(&td_chan->active_list)) {
dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
txd->cookie);
list_add_tail(&td_desc->desc_node, &td_chan->active_list);
__td_start_dma(td_chan);
} else {
dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
txd->cookie);
list_add_tail(&td_desc->desc_node, &td_chan->queue);
}
spin_unlock_bh(&td_chan->lock);
return cookie;
}
static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
{
struct dma_chan *chan = &td_chan->chan;
struct timb_dma_desc *td_desc;
int err;
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
if (!td_desc)
goto out;
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
if (!td_desc->desc_list)
goto err;
dma_async_tx_descriptor_init(&td_desc->txd, chan);
td_desc->txd.tx_submit = td_tx_submit;
td_desc->txd.flags = DMA_CTRL_ACK;
td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
if (err) {
dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
goto err;
}
return td_desc;
err:
kfree(td_desc->desc_list);
kfree(td_desc);
out:
return NULL;
}
static void td_free_desc(struct timb_dma_desc *td_desc)
{
dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_TO_DEVICE);
kfree(td_desc->desc_list);
kfree(td_desc);
}
static void td_desc_put(struct timb_dma_chan *td_chan,
struct timb_dma_desc *td_desc)
{
dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
spin_lock_bh(&td_chan->lock);
list_add(&td_desc->desc_node, &td_chan->free_list);
spin_unlock_bh(&td_chan->lock);
}
static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
{
struct timb_dma_desc *td_desc, *_td_desc;
struct timb_dma_desc *ret = NULL;
spin_lock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
desc_node) {
if (async_tx_test_ack(&td_desc->txd)) {
list_del(&td_desc->desc_node);
ret = td_desc;
break;
}
dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
td_desc);
}
spin_unlock_bh(&td_chan->lock);
return ret;
}
static int td_alloc_chan_resources(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
int i;
dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
BUG_ON(!list_empty(&td_chan->free_list));
for (i = 0; i < td_chan->descs; i++) {
struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
if (!td_desc) {
if (i)
break;
else {
dev_err(chan2dev(chan),
"Couldn't allocate any descriptors\n");
return -ENOMEM;
}
}
td_desc_put(td_chan, td_desc);
}
spin_lock_bh(&td_chan->lock);
dma_cookie_init(chan);
spin_unlock_bh(&td_chan->lock);
return 0;
}
static void td_free_chan_resources(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
struct timb_dma_desc *td_desc, *_td_desc;
LIST_HEAD(list);
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
/* check that all descriptors are free */
BUG_ON(!list_empty(&td_chan->active_list));
BUG_ON(!list_empty(&td_chan->queue));
spin_lock_bh(&td_chan->lock);
list_splice_init(&td_chan->free_list, &list);
spin_unlock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
td_desc);
td_free_desc(td_desc);
}
}
static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
enum dma_status ret;
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
ret = dma_cookie_status(chan, cookie, txstate);
dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
return ret;
}
static void td_issue_pending(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
spin_lock_bh(&td_chan->lock);
if (!list_empty(&td_chan->active_list))
/* transfer ongoing */
if (__td_dma_done_ack(td_chan))
__td_finish(td_chan);
if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
__td_start_next(td_chan);
spin_unlock_bh(&td_chan->lock);
}
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags,
void *context)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
struct timb_dma_desc *td_desc;
struct scatterlist *sg;
unsigned int i;
unsigned int desc_usage = 0;
if (!sgl || !sg_len) {
dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
return NULL;
}
/* even channels are for RX, odd for TX */
if (td_chan->direction != direction) {
dev_err(chan2dev(chan),
"Requesting channel in wrong direction\n");
return NULL;
}
td_desc = td_desc_get(td_chan);
if (!td_desc) {
dev_err(chan2dev(chan), "Not enough descriptors available\n");
return NULL;
}
td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
for_each_sg(sgl, sg, sg_len, i) {
int err;
if (desc_usage > td_desc->desc_list_len) {
dev_err(chan2dev(chan), "No descriptor space\n");
return NULL;
}
err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
i == (sg_len - 1));
if (err) {
dev_err(chan2dev(chan), "Failed to update desc: %d\n",
err);
td_desc_put(td_chan, td_desc);
return NULL;
}
desc_usage += TIMB_DMA_DESC_SIZE;
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}
static int td_terminate_all(struct dma_chan *chan)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
struct timb_dma_desc *td_desc, *_td_desc;
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
/* first the easy part, put the queue into the free list */
spin_lock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
desc_node)
list_move(&td_desc->desc_node, &td_chan->free_list);
/* now tear down the running */
__td_finish(td_chan);
spin_unlock_bh(&td_chan->lock);
return 0;
}
static void td_tasklet(struct tasklet_struct *t)
{
struct timb_dma *td = from_tasklet(td, t, tasklet);
u32 isr;
u32 ipr;
u32 ier;
int i;
isr = ioread32(td->membase + TIMBDMA_ISR);
ipr = isr & __td_ier_mask(td);
/* ack the interrupts */
iowrite32(ipr, td->membase + TIMBDMA_ISR);
for (i = 0; i < td->dma.chancnt; i++)
if (ipr & (1 << i)) {
struct timb_dma_chan *td_chan = td->channels + i;
spin_lock(&td_chan->lock);
__td_finish(td_chan);
if (!list_empty(&td_chan->queue))
__td_start_next(td_chan);
spin_unlock(&td_chan->lock);
}
ier = __td_ier_mask(td);
iowrite32(ier, td->membase + TIMBDMA_IER);
}
static irqreturn_t td_irq(int irq, void *devid)
{
struct timb_dma *td = devid;
u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
if (ipr) {
/* disable interrupts, will be re-enabled in tasklet */
iowrite32(0, td->membase + TIMBDMA_IER);
tasklet_schedule(&td->tasklet);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
static int td_probe(struct platform_device *pdev)
{
struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timb_dma *td;
struct resource *iomem;
int irq;
int err;
int i;
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
return -EINVAL;
}
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!request_mem_region(iomem->start, resource_size(iomem),
DRIVER_NAME))
return -EBUSY;
td = kzalloc(struct_size(td, channels, pdata->nr_channels),
GFP_KERNEL);
if (!td) {
err = -ENOMEM;
goto err_release_region;
}
dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
td->membase = ioremap(iomem->start, resource_size(iomem));
if (!td->membase) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto err_free_mem;
}
/* 32bit addressing */
iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
/* disable and clear any interrupts */
iowrite32(0x0, td->membase + TIMBDMA_IER);
iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
tasklet_setup(&td->tasklet, td_tasklet);
err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
if (err) {
dev_err(&pdev->dev, "Failed to request IRQ\n");
goto err_tasklet_kill;
}
td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
td->dma.device_free_chan_resources = td_free_chan_resources;
td->dma.device_tx_status = td_tx_status;
td->dma.device_issue_pending = td_issue_pending;
dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
td->dma.device_prep_slave_sg = td_prep_slave_sg;
td->dma.device_terminate_all = td_terminate_all;
td->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&td->dma.channels);
for (i = 0; i < pdata->nr_channels; i++) {
struct timb_dma_chan *td_chan = &td->channels[i];
struct timb_dma_platform_data_channel *pchan =
pdata->channels + i;
/* even channels are RX, odd are TX */
if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
goto err_free_irq;
}
td_chan->chan.device = &td->dma;
dma_cookie_init(&td_chan->chan);
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);
INIT_LIST_HEAD(&td_chan->free_list);
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
DMA_MEM_TO_DEV;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
(pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
i, td_chan->membase);
list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
}
err = dma_async_device_register(&td->dma);
if (err) {
dev_err(&pdev->dev, "Failed to register async device\n");
goto err_free_irq;
}
platform_set_drvdata(pdev, td);
dev_dbg(&pdev->dev, "Probe result: %d\n", err);
return err;
err_free_irq:
free_irq(irq, td);
err_tasklet_kill:
tasklet_kill(&td->tasklet);
iounmap(td->membase);
err_free_mem:
kfree(td);
err_release_region:
release_mem_region(iomem->start, resource_size(iomem));
return err;
}
static void td_remove(struct platform_device *pdev)
{
struct timb_dma *td = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
dma_async_device_unregister(&td->dma);
free_irq(irq, td);
tasklet_kill(&td->tasklet);
iounmap(td->membase);
kfree(td);
release_mem_region(iomem->start, resource_size(iomem));
dev_dbg(&pdev->dev, "Removed...\n");
}
static struct platform_driver td_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = td_probe,
.remove = td_remove,
};
module_platform_driver(td_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
MODULE_AUTHOR("Pelagicore AB <[email protected]>");
MODULE_ALIAS("platform:"DRIVER_NAME);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* tifm_7xx1.c - TI FlashMedia driver
*
* Copyright (C) 2006 Alex Dubov <[email protected]>
*/
#include <linux/tifm.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#define DRIVER_NAME "tifm_7xx1"
#define DRIVER_VERSION "0.8"
#define TIFM_IRQ_ENABLE 0x80000000
#define TIFM_IRQ_SOCKMASK(x) (x)
#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
#define TIFM_IRQ_SETALL 0xffffffff
static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
}
static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
{
unsigned long flags;
spin_lock_irqsave(&fm->lock, flags);
fm->socket_change_set |= 1 << sock->socket_id;
tifm_queue_work(&fm->media_switcher);
spin_unlock_irqrestore(&fm->lock, flags);
}
static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
{
struct tifm_adapter *fm = dev_id;
struct tifm_dev *sock;
unsigned int irq_status, cnt;
spin_lock(&fm->lock);
irq_status = readl(fm->addr + FM_INTERRUPT_STATUS);
if (irq_status == 0 || irq_status == (~0)) {
spin_unlock(&fm->lock);
return IRQ_NONE;
}
if (irq_status & TIFM_IRQ_ENABLE) {
writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
for (cnt = 0; cnt < fm->num_sockets; cnt++) {
sock = fm->sockets[cnt];
if (sock) {
if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1))
sock->data_event(sock);
if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1))
sock->card_event(sock);
}
}
fm->socket_change_set |= irq_status
& ((1 << fm->num_sockets) - 1);
}
writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
if (fm->finish_me)
complete_all(fm->finish_me);
else if (!fm->socket_change_set)
writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
else
tifm_queue_work(&fm->media_switcher);
spin_unlock(&fm->lock);
return IRQ_HANDLED;
}
static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr)
{
unsigned int s_state;
int cnt;
writel(0x0e00, sock_addr + SOCK_CONTROL);
for (cnt = 16; cnt <= 256; cnt <<= 1) {
if (!(TIFM_SOCK_STATE_POWERED
& readl(sock_addr + SOCK_PRESENT_STATE)))
break;
msleep(cnt);
}
s_state = readl(sock_addr + SOCK_PRESENT_STATE);
if (!(TIFM_SOCK_STATE_OCCUPIED & s_state))
return 0;
writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED,
sock_addr + SOCK_CONTROL);
/* xd needs some extra time before power on */
if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7)
== TIFM_TYPE_XD)
msleep(40);
writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00,
sock_addr + SOCK_CONTROL);
/* wait for power to stabilize */
msleep(20);
for (cnt = 16; cnt <= 256; cnt <<= 1) {
if ((TIFM_SOCK_STATE_POWERED
& readl(sock_addr + SOCK_PRESENT_STATE)))
break;
msleep(cnt);
}
writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED),
sock_addr + SOCK_CONTROL);
return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7;
}
inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr)
{
writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL),
sock_addr + SOCK_CONTROL);
}
inline static char __iomem *
tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
{
return base_addr + ((sock_num + 1) << 10);
}
static void tifm_7xx1_switch_media(struct work_struct *work)
{
struct tifm_adapter *fm = container_of(work, struct tifm_adapter,
media_switcher);
struct tifm_dev *sock;
char __iomem *sock_addr;
unsigned long flags;
unsigned char media_id;
unsigned int socket_change_set, cnt;
spin_lock_irqsave(&fm->lock, flags);
socket_change_set = fm->socket_change_set;
fm->socket_change_set = 0;
dev_dbg(fm->dev.parent, "checking media set %x\n",
socket_change_set);
if (!socket_change_set) {
spin_unlock_irqrestore(&fm->lock, flags);
return;
}
for (cnt = 0; cnt < fm->num_sockets; cnt++) {
if (!(socket_change_set & (1 << cnt)))
continue;
sock = fm->sockets[cnt];
if (sock) {
printk(KERN_INFO
"%s : demand removing card from socket %u:%u\n",
dev_name(&fm->dev), fm->id, cnt);
fm->sockets[cnt] = NULL;
sock_addr = sock->addr;
spin_unlock_irqrestore(&fm->lock, flags);
device_unregister(&sock->dev);
spin_lock_irqsave(&fm->lock, flags);
tifm_7xx1_sock_power_off(sock_addr);
writel(0x0e00, sock_addr + SOCK_CONTROL);
}
spin_unlock_irqrestore(&fm->lock, flags);
media_id = tifm_7xx1_toggle_sock_power(
tifm_7xx1_sock_addr(fm->addr, cnt));
// tifm_alloc_device will check if media_id is valid
sock = tifm_alloc_device(fm, cnt, media_id);
if (sock) {
sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt);
if (!device_register(&sock->dev)) {
spin_lock_irqsave(&fm->lock, flags);
if (!fm->sockets[cnt]) {
fm->sockets[cnt] = sock;
sock = NULL;
}
spin_unlock_irqrestore(&fm->lock, flags);
}
if (sock)
put_device(&sock->dev);
}
spin_lock_irqsave(&fm->lock, flags);
}
writel(TIFM_IRQ_FIFOMASK(socket_change_set)
| TIFM_IRQ_CARDMASK(socket_change_set),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
writel(TIFM_IRQ_FIFOMASK(socket_change_set)
| TIFM_IRQ_CARDMASK(socket_change_set),
fm->addr + FM_SET_INTERRUPT_ENABLE);
writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&fm->lock, flags);
}
static int __maybe_unused tifm_7xx1_suspend(struct device *dev_d)
{
struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
dev_dbg(&dev->dev, "suspending host\n");
for (cnt = 0; cnt < fm->num_sockets; cnt++) {
if (fm->sockets[cnt])
tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
}
device_wakeup_disable(dev_d);
return 0;
}
static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
{
struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
unsigned long time_left;
unsigned int good_sockets = 0, bad_sockets = 0;
unsigned long flags;
/* Maximum number of entries is 4 */
unsigned char new_ids[4];
DECLARE_COMPLETION_ONSTACK(finish_resume);
if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids)))
return -ENXIO;
pci_set_master(dev);
dev_dbg(&dev->dev, "resuming host\n");
for (rc = 0; rc < fm->num_sockets; rc++)
new_ids[rc] = tifm_7xx1_toggle_sock_power(
tifm_7xx1_sock_addr(fm->addr, rc));
spin_lock_irqsave(&fm->lock, flags);
for (rc = 0; rc < fm->num_sockets; rc++) {
if (fm->sockets[rc]) {
if (fm->sockets[rc]->type == new_ids[rc])
good_sockets |= 1 << rc;
else
bad_sockets |= 1 << rc;
}
}
writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
fm->addr + FM_SET_INTERRUPT_ENABLE);
dev_dbg(&dev->dev, "change sets on resume: good %x, bad %x\n",
good_sockets, bad_sockets);
fm->socket_change_set = 0;
if (good_sockets) {
fm->finish_me = &finish_resume;
spin_unlock_irqrestore(&fm->lock, flags);
time_left = wait_for_completion_timeout(&finish_resume, HZ);
dev_dbg(&dev->dev, "wait returned %lu\n", time_left);
writel(TIFM_IRQ_FIFOMASK(good_sockets)
| TIFM_IRQ_CARDMASK(good_sockets),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
writel(TIFM_IRQ_FIFOMASK(good_sockets)
| TIFM_IRQ_CARDMASK(good_sockets),
fm->addr + FM_SET_INTERRUPT_ENABLE);
spin_lock_irqsave(&fm->lock, flags);
fm->finish_me = NULL;
fm->socket_change_set ^= good_sockets & fm->socket_change_set;
}
fm->socket_change_set |= bad_sockets;
if (fm->socket_change_set)
tifm_queue_work(&fm->media_switcher);
spin_unlock_irqrestore(&fm->lock, flags);
writel(TIFM_IRQ_ENABLE,
fm->addr + FM_SET_INTERRUPT_ENABLE);
return 0;
}
static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
return 0;
}
static int tifm_7xx1_has_ms_pif(struct tifm_adapter *fm, struct tifm_dev *sock)
{
if (((fm->num_sockets == 4) && (sock->socket_id == 2))
|| ((fm->num_sockets == 2) && (sock->socket_id == 0)))
return 1;
return 0;
}
static int tifm_7xx1_probe(struct pci_dev *dev,
const struct pci_device_id *dev_id)
{
struct tifm_adapter *fm;
int pci_dev_busy = 0;
int rc;
rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = pci_enable_device(dev);
if (rc)
return rc;
pci_set_master(dev);
rc = pci_request_regions(dev, DRIVER_NAME);
if (rc) {
pci_dev_busy = 1;
goto err_out;
}
pci_intx(dev, 1);
fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM
? 4 : 2, &dev->dev);
if (!fm) {
rc = -ENOMEM;
goto err_out_int;
}
INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media);
fm->eject = tifm_7xx1_eject;
fm->has_ms_pif = tifm_7xx1_has_ms_pif;
pci_set_drvdata(dev, fm);
fm->addr = pci_ioremap_bar(dev, 0);
if (!fm->addr) {
rc = -ENODEV;
goto err_out_free;
}
rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm);
if (rc)
goto err_out_unmap;
rc = tifm_add_adapter(fm);
if (rc)
goto err_out_irq;
writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
fm->addr + FM_SET_INTERRUPT_ENABLE);
return 0;
err_out_irq:
free_irq(dev->irq, fm);
err_out_unmap:
iounmap(fm->addr);
err_out_free:
tifm_free_adapter(fm);
err_out_int:
pci_intx(dev, 0);
pci_release_regions(dev);
err_out:
if (!pci_dev_busy)
pci_disable_device(dev);
return rc;
}
static void tifm_7xx1_remove(struct pci_dev *dev)
{
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
fm->eject = tifm_7xx1_dummy_eject;
fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
free_irq(dev->irq, fm);
tifm_remove_adapter(fm);
for (cnt = 0; cnt < fm->num_sockets; cnt++)
tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
iounmap(fm->addr);
pci_intx(dev, 0);
pci_release_regions(dev);
pci_disable_device(dev);
tifm_free_adapter(fm);
}
static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ }
};
static SIMPLE_DEV_PM_OPS(tifm_7xx1_pm_ops, tifm_7xx1_suspend, tifm_7xx1_resume);
static struct pci_driver tifm_7xx1_driver = {
.name = DRIVER_NAME,
.id_table = tifm_7xx1_pci_tbl,
.probe = tifm_7xx1_probe,
.remove = tifm_7xx1_remove,
.driver.pm = &tifm_7xx1_pm_ops,
};
module_pci_driver(tifm_7xx1_driver);
MODULE_AUTHOR("Alex Dubov");
MODULE_DESCRIPTION("TI FlashMedia host driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl);
MODULE_VERSION(DRIVER_VERSION);
|
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright 2017 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _UAPI_LINUX_ASPEED_LPC_CTRL_H
#define _UAPI_LINUX_ASPEED_LPC_CTRL_H
#include <linux/ioctl.h>
#include <linux/types.h>
/* Window types */
#define ASPEED_LPC_CTRL_WINDOW_FLASH 1
#define ASPEED_LPC_CTRL_WINDOW_MEMORY 2
/*
* This driver provides a window for the host to access a BMC resource
* across the BMC <-> Host LPC bus.
*
* window_type: The BMC resource that the host will access through the
* window. BMC flash and BMC RAM.
*
* window_id: For each window type there may be multiple windows,
* these are referenced by ID.
*
* flags: Reserved for future use, this field is expected to be
* zeroed.
*
* addr: Address on the host LPC bus that the specified window should
* be mapped. This address must be power of two aligned.
*
* offset: Offset into the BMC window that should be mapped to the
* host (at addr). This must be a multiple of size.
*
* size: The size of the mapping. The smallest possible size is 64K.
* This must be power of two aligned.
*
*/
struct aspeed_lpc_ctrl_mapping {
__u8 window_type;
__u8 window_id;
__u16 flags;
__u32 addr;
__u32 offset;
__u32 size;
};
#define __ASPEED_LPC_CTRL_IOCTL_MAGIC 0xb2
#define ASPEED_LPC_CTRL_IOCTL_GET_SIZE _IOWR(__ASPEED_LPC_CTRL_IOCTL_MAGIC, \
0x00, struct aspeed_lpc_ctrl_mapping)
#define ASPEED_LPC_CTRL_IOCTL_MAP _IOW(__ASPEED_LPC_CTRL_IOCTL_MAGIC, \
0x01, struct aspeed_lpc_ctrl_mapping)
#endif /* _UAPI_LINUX_ASPEED_LPC_CTRL_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* POWERNV cpufreq driver for the IBM POWER processors
*
* (C) Copyright IBM 2014
*
* Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
*/
#define pr_fmt(fmt) "powernv-cpufreq: " fmt
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/smp.h>
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
#include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#include <asm/opal.h>
#include <linux/timer.h>
#define POWERNV_MAX_PSTATES_ORDER 8
#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
#define MAX_NR_CHIPS 32
#define MAX_RAMP_DOWN_TIME 5120
/*
* On an idle system we want the global pstate to ramp-down from max value to
* min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
* then ramp-down rapidly later on.
*
* This gives a percentage rampdown for time elapsed in milliseconds.
* ramp_down_percentage = ((ms * ms) >> 18)
* ~= 3.8 * (sec * sec)
*
* At 0 ms ramp_down_percent = 0
* At 5120 ms ramp_down_percent = 100
*/
#define ramp_down_percent(time) ((time * time) >> 18)
/* Interval after which the timer is queued to bring down global pstate */
#define GPSTATE_TIMER_INTERVAL 2000
/**
* struct global_pstate_info - Per policy data structure to maintain history of
* global pstates
* @highest_lpstate_idx: The local pstate index from which we are
* ramping down
* @elapsed_time: Time in ms spent in ramping down from
* highest_lpstate_idx
* @last_sampled_time: Time from boot in ms when global pstates were
* last set
* @last_lpstate_idx: Last set value of local pstate and global
* @last_gpstate_idx: pstate in terms of cpufreq table index
* @timer: Is used for ramping down if cpu goes idle for
* a long time with global pstate held high
* @gpstate_lock: A spinlock to maintain synchronization between
* routines called by the timer handler and
* governer's target_index calls
* @policy: Associated CPUFreq policy
*/
struct global_pstate_info {
int highest_lpstate_idx;
unsigned int elapsed_time;
unsigned int last_sampled_time;
int last_lpstate_idx;
int last_gpstate_idx;
spinlock_t gpstate_lock;
struct timer_list timer;
struct cpufreq_policy *policy;
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
/**
* struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
* indexed by a function of pstate id.
*
* @pstate_id: pstate id for this entry.
*
* @cpufreq_table_idx: Index into the powernv_freqs
* cpufreq_frequency_table for frequency
* corresponding to pstate_id.
*
* @hentry: hlist_node that hooks this entry into the pstate_revmap
* hashtable
*/
struct pstate_idx_revmap_data {
u8 pstate_id;
unsigned int cpufreq_table_idx;
struct hlist_node hentry;
};
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
"No throttling",
"Power Cap",
"Processor Over Temperature",
"Power Supply Failure",
"Over Current",
"OCC Reset"
};
enum throttle_reason_type {
NO_THROTTLE = 0,
POWERCAP,
CPU_OVERTEMP,
POWER_SUPPLY_FAILURE,
OVERCURRENT,
OCC_RESET_THROTTLE,
OCC_MAX_REASON
};
static struct chip {
unsigned int id;
bool throttled;
bool restore;
u8 throttle_reason;
cpumask_t mask;
struct work_struct throttle;
int throttle_turbo;
int throttle_sub_turbo;
int reason[OCC_MAX_REASON];
} *chips;
static int nr_chips;
static DEFINE_PER_CPU(struct chip *, chip_info);
/*
* Note:
* The set of pstates consists of contiguous integers.
* powernv_pstate_info stores the index of the frequency table for
* max, min and nominal frequencies. It also stores number of
* available frequencies.
*
* powernv_pstate_info.nominal indicates the index to the highest
* non-turbo frequency.
*/
static struct powernv_pstate_info {
unsigned int min;
unsigned int max;
unsigned int nominal;
unsigned int nr_pstates;
bool wof_enabled;
} powernv_pstate_info;
static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
{
return ((pmsr_val >> shift) & 0xFF);
}
#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
/* Use following functions for conversions between pstate_id and index */
/*
* idx_to_pstate : Returns the pstate id corresponding to the
* frequency in the cpufreq frequency table
* powernv_freqs indexed by @i.
*
* If @i is out of bound, this will return the pstate
* corresponding to the nominal frequency.
*/
static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
/*
* pstate_to_idx : Returns the index in the cpufreq frequencytable
* powernv_freqs for the frequency whose corresponding
* pstate id is @pstate.
*
* If no frequency corresponding to @pstate is found,
* this will return the index of the nominal
* frequency.
*/
static unsigned int pstate_to_idx(u8 pstate)
{
unsigned int key = pstate % POWERNV_MAX_PSTATES;
struct pstate_idx_revmap_data *revmap_data;
hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
if (revmap_data->pstate_id == pstate)
return revmap_data->cpufreq_table_idx;
}
pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
{
struct global_pstate_info *gpstates = policy->driver_data;
gpstates->highest_lpstate_idx = 0;
gpstates->elapsed_time = 0;
gpstates->last_sampled_time = 0;
gpstates->last_lpstate_idx = 0;
gpstates->last_gpstate_idx = 0;
}
/*
* Initialize the freq table based on data obtained
* from the firmware passed via device-tree
*/
static int init_powernv_pstates(void)
{
struct device_node *power_mgt;
int i, nr_pstates = 0;
const __be32 *pstate_ids, *pstate_freqs;
u32 len_ids, len_freqs;
u32 pstate_min, pstate_max, pstate_nominal;
u32 pstate_turbo, pstate_ultra_turbo;
int rc = -ENODEV;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
pr_warn("power-mgt node not found\n");
return -ENODEV;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
pr_warn("ibm,pstate-min node not found\n");
goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
pr_warn("ibm,pstate-max node not found\n");
goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
&pstate_nominal)) {
pr_warn("ibm,pstate-nominal not found\n");
goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
&pstate_ultra_turbo)) {
powernv_pstate_info.wof_enabled = false;
goto next;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
&pstate_turbo)) {
powernv_pstate_info.wof_enabled = false;
goto next;
}
if (pstate_turbo == pstate_ultra_turbo)
powernv_pstate_info.wof_enabled = false;
else
powernv_pstate_info.wof_enabled = true;
next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
pr_warn("ibm,pstate-ids not found\n");
goto out;
}
pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
&len_freqs);
if (!pstate_freqs) {
pr_warn("ibm,pstate-frequencies-mhz not found\n");
goto out;
}
if (len_ids != len_freqs) {
pr_warn("Entries in ibm,pstate-ids and "
"ibm,pstate-frequencies-mhz does not match\n");
}
nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
if (!nr_pstates) {
pr_warn("No PStates found\n");
goto out;
}
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
struct pstate_idx_revmap_data *revmap_data;
unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id & 0xFF;
revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
if (!revmap_data) {
rc = -ENOMEM;
goto out;
}
revmap_data->pstate_id = id & 0xFF;
revmap_data->cpufreq_table_idx = i;
key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
int j;
for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
}
}
/* End of list marker entry */
powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
of_node_put(power_mgt);
return 0;
out:
of_node_put(power_mgt);
return rc;
}
/* Returns the CPU frequency corresponding to the pstate_id. */
static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
return powernv_freqs[i].frequency;
}
/*
* cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
* the firmware
*/
static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
char *buf)
{
return sprintf(buf, "%u\n",
powernv_freqs[powernv_pstate_info.nominal].frequency);
}
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
#define SCALING_BOOST_FREQS_ATTR_INDEX 2
static struct freq_attr *powernv_cpu_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
&cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
#define throttle_attr(name, member) \
static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
{ \
struct chip *chip = per_cpu(chip_info, policy->cpu); \
\
return sprintf(buf, "%u\n", chip->member); \
} \
\
static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
throttle_attr(unthrottle, reason[NO_THROTTLE]);
throttle_attr(powercap, reason[POWERCAP]);
throttle_attr(overtemp, reason[CPU_OVERTEMP]);
throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
throttle_attr(overcurrent, reason[OVERCURRENT]);
throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
throttle_attr(turbo_stat, throttle_turbo);
throttle_attr(sub_turbo_stat, throttle_sub_turbo);
static struct attribute *throttle_attrs[] = {
&throttle_attr_unthrottle.attr,
&throttle_attr_powercap.attr,
&throttle_attr_overtemp.attr,
&throttle_attr_supply_fault.attr,
&throttle_attr_overcurrent.attr,
&throttle_attr_occ_reset.attr,
&throttle_attr_turbo_stat.attr,
&throttle_attr_sub_turbo_stat.attr,
NULL,
};
static const struct attribute_group throttle_attr_grp = {
.name = "throttle_stats",
.attrs = throttle_attrs,
};
/* Helper routines */
/* Access helpers to power mgt SPR */
static inline unsigned long get_pmspr(unsigned long sprn)
{
switch (sprn) {
case SPRN_PMCR:
return mfspr(SPRN_PMCR);
case SPRN_PMICR:
return mfspr(SPRN_PMICR);
case SPRN_PMSR:
return mfspr(SPRN_PMSR);
}
BUG();
}
static inline void set_pmspr(unsigned long sprn, unsigned long val)
{
switch (sprn) {
case SPRN_PMCR:
mtspr(SPRN_PMCR, val);
return;
case SPRN_PMICR:
mtspr(SPRN_PMICR, val);
return;
}
BUG();
}
/*
* Use objects of this type to query/update
* pstates on a remote CPU via smp_call_function.
*/
struct powernv_smp_call_data {
unsigned int freq;
u8 pstate_id;
u8 gpstate_id;
};
/*
* powernv_read_cpu_freq: Reads the current frequency on this CPU.
*
* Called via smp_call_function.
*
* Note: The caller of the smp_call_function should pass an argument of
* the type 'struct powernv_smp_call_data *' along with this function.
*
* The current frequency on this CPU will be returned via
* ((struct powernv_smp_call_data *)arg)->freq;
*/
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
freq_data->freq);
}
/*
* powernv_cpufreq_get: Returns the CPU frequency as reported by the
* firmware for CPU 'cpu'. This value is reported through the sysfs
* file cpuinfo_cur_freq.
*/
static unsigned int powernv_cpufreq_get(unsigned int cpu)
{
struct powernv_smp_call_data freq_data;
smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
&freq_data, 1);
return freq_data.freq;
}
/*
* set_pstate: Sets the pstate on this CPU.
*
* This is called via an smp_call_function.
*
* The caller must ensure that freq_data is of the type
* (struct powernv_smp_call_data *) and the pstate_id which needs to be set
* on this CPU should be present in freq_data->pstate_id.
*/
static void set_pstate(void *data)
{
unsigned long val;
struct powernv_smp_call_data *freq_data = data;
unsigned long pstate_ul = freq_data->pstate_id;
unsigned long gpstate_ul = freq_data->gpstate_id;
val = get_pmspr(SPRN_PMCR);
val = val & 0x0000FFFFFFFFFFFFULL;
pstate_ul = pstate_ul & 0xFF;
gpstate_ul = gpstate_ul & 0xFF;
/* Set both global(bits 56..63) and local(bits 48..55) PStates */
val = val | (gpstate_ul << 56) | (pstate_ul << 48);
pr_debug("Setting cpu %d pmcr to %016lX\n",
raw_smp_processor_id(), val);
set_pmspr(SPRN_PMCR, val);
}
/*
* get_nominal_index: Returns the index corresponding to the nominal
* pstate in the cpufreq table
*/
static inline unsigned int get_nominal_index(void)
{
return powernv_pstate_info.nominal;
}
static void powernv_cpufreq_throttle_check(void *data)
{
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
} else {
chip->throttle_turbo++;
}
trace_powernv_throttle(chip->id,
throttle_reason[chip->throttle_reason],
pmsr_pmax);
} else if (chip->throttled) {
chip->throttled = false;
trace_powernv_throttle(chip->id,
throttle_reason[chip->throttle_reason],
pmsr_pmax);
}
/* Check if Psafe_mode_active is set in PMSR. */
next:
if (pmsr & PMSR_PSAFE_ENABLE) {
throttled = true;
pr_info("Pstate set to safe frequency\n");
}
/* Check if SPR_EM_DISABLE is set in PMSR */
if (pmsr & PMSR_SPR_EM_DISABLE) {
throttled = true;
pr_info("Frequency Control disabled from OS\n");
}
if (throttled) {
pr_info("PMSR = %16lx\n", pmsr);
pr_warn("CPU Frequency could be throttled\n");
}
}
/**
* calc_global_pstate - Calculate global pstate
* @elapsed_time: Elapsed time in milliseconds
* @local_pstate_idx: New local pstate
* @highest_lpstate_idx: pstate from which its ramping down
*
* Finds the appropriate global pstate based on the pstate from which its
* ramping down and the time elapsed in ramping down. It follows a quadratic
* equation which ensures that it reaches ramping down to pmin in 5sec.
*/
static inline int calc_global_pstate(unsigned int elapsed_time,
int highest_lpstate_idx,
int local_pstate_idx)
{
int index_diff;
/*
* Using ramp_down_percent we get the percentage of rampdown
* that we are expecting to be dropping. Difference between
* highest_lpstate_idx and powernv_pstate_info.min will give a absolute
* number of how many pstates we will drop eventually by the end of
* 5 seconds, then just scale it get the number pstates to be dropped.
*/
index_diff = ((int)ramp_down_percent(elapsed_time) *
(powernv_pstate_info.min - highest_lpstate_idx)) / 100;
/* Ensure that global pstate is >= to local pstate */
if (highest_lpstate_idx + index_diff >= local_pstate_idx)
return local_pstate_idx;
else
return highest_lpstate_idx + index_diff;
}
static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
{
unsigned int timer_interval;
/*
* Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
* if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
* Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
* seconds of ramp down time.
*/
if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
> MAX_RAMP_DOWN_TIME)
timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
else
timer_interval = GPSTATE_TIMER_INTERVAL;
mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
}
/**
* gpstate_timer_handler
*
* @t: Timer context used to fetch global pstate info struct
*
* This handler brings down the global pstate closer to the local pstate
* according quadratic equation. Queues a new timer if it is still not equal
* to local pstate
*/
static void gpstate_timer_handler(struct timer_list *t)
{
struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
struct cpufreq_policy *policy = gpstates->policy;
int gpstate_idx, lpstate_idx;
unsigned long val;
unsigned int time_diff = jiffies_to_msecs(jiffies)
- gpstates->last_sampled_time;
struct powernv_smp_call_data freq_data;
if (!spin_trylock(&gpstates->gpstate_lock))
return;
/*
* If the timer has migrated to the different cpu then bring
* it back to one of the policy->cpus
*/
if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
spin_unlock(&gpstates->gpstate_lock);
return;
}
/*
* If PMCR was last updated was using fast_switch then
* We may have wrong in gpstate->last_lpstate_idx
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
freq_data.gpstate_id = extract_global_pstate(val);
freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
return;
}
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
gpstate_idx = pstate_to_idx(freq_data.pstate_id);
lpstate_idx = gpstate_idx;
reset_gpstates(policy);
gpstates->highest_lpstate_idx = gpstate_idx;
} else {
lpstate_idx = pstate_to_idx(freq_data.pstate_id);
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
lpstate_idx);
}
freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
gpstates->last_gpstate_idx = gpstate_idx;
gpstates->last_lpstate_idx = lpstate_idx;
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
set_pstate(&freq_data);
spin_unlock(&gpstates->gpstate_lock);
}
/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
* mask policy->cpus
*/
static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
unsigned int new_index)
{
struct powernv_smp_call_data freq_data;
unsigned int cur_msec, gpstate_idx;
struct global_pstate_info *gpstates = policy->driver_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
if (!throttled) {
/* we don't want to be preempted while
* checking if the CPU frequency has been throttled
*/
preempt_disable();
powernv_cpufreq_throttle_check(NULL);
preempt_enable();
}
cur_msec = jiffies_to_msecs(get_jiffies_64());
freq_data.pstate_id = idx_to_pstate(new_index);
if (!gpstates) {
freq_data.gpstate_id = freq_data.pstate_id;
goto no_gpstate;
}
spin_lock(&gpstates->gpstate_lock);
if (!gpstates->last_sampled_time) {
gpstate_idx = new_index;
gpstates->highest_lpstate_idx = new_index;
goto gpstates_done;
}
if (gpstates->last_gpstate_idx < new_index) {
gpstates->elapsed_time += cur_msec -
gpstates->last_sampled_time;
/*
* If its has been ramping down for more than MAX_RAMP_DOWN_TIME
* we should be resetting all global pstate related data. Set it
* equal to local pstate to start fresh.
*/
if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
reset_gpstates(policy);
gpstates->highest_lpstate_idx = new_index;
gpstate_idx = new_index;
} else {
/* Elaspsed_time is less than 5 seconds, continue to rampdown */
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
new_index);
}
} else {
reset_gpstates(policy);
gpstates->highest_lpstate_idx = new_index;
gpstate_idx = new_index;
}
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
if (gpstate_idx != new_index)
queue_gpstate_timer(gpstates);
else
del_timer_sync(&gpstates->timer);
gpstates_done:
freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
gpstates->last_sampled_time = cur_msec;
gpstates->last_gpstate_idx = gpstate_idx;
gpstates->last_lpstate_idx = new_index;
spin_unlock(&gpstates->gpstate_lock);
no_gpstate:
/*
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
* if current CPU is within policy->cpus (core)
*/
smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
return 0;
}
static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int base, i;
struct kernfs_node *kn;
struct global_pstate_info *gpstates;
base = cpu_first_thread_sibling(policy->cpu);
for (i = 0; i < threads_per_core; i++)
cpumask_set_cpu(base + i, policy->cpus);
kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
if (!kn) {
int ret;
ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
if (ret) {
pr_info("Failed to create throttle stats directory for cpu %d\n",
policy->cpu);
return ret;
}
} else {
kernfs_put(kn);
}
policy->freq_table = powernv_freqs;
policy->fast_switch_possible = true;
if (pvr_version_is(PVR_POWER9))
return 0;
/* Initialise Gpstate ramp-down timer only on POWER8 */
gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
if (!gpstates)
return -ENOMEM;
policy->driver_data = gpstates;
/* initialize timer */
gpstates->policy = policy;
timer_setup(&gpstates->timer, gpstate_timer_handler,
TIMER_PINNED | TIMER_DEFERRABLE);
gpstates->timer.expires = jiffies +
msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
spin_lock_init(&gpstates->gpstate_lock);
return 0;
}
static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
if (gpstates)
del_timer_sync(&gpstates->timer);
kfree(policy->driver_data);
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
int cpu;
struct cpufreq_policy *cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
continue;
powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
cpufreq_cpu_put(cpu_policy);
}
return NOTIFY_DONE;
}
static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier,
};
static void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
struct cpufreq_policy *policy;
unsigned int cpu;
cpumask_t mask;
cpus_read_lock();
cpumask_and(&mask, &chip->mask, cpu_online_mask);
smp_call_function_any(&mask,
powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore)
goto out;
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
index = cpufreq_table_find_index_c(policy, policy->cur, false);
powernv_cpufreq_target_index(policy, index);
cpumask_andnot(&mask, &mask, policy->cpus);
cpufreq_cpu_put(policy);
}
out:
cpus_read_unlock();
}
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
unsigned long msg_type, void *_msg)
{
struct opal_msg *msg = _msg;
struct opal_occ_msg omsg;
int i;
if (msg_type != OPAL_MSG_OCC)
return 0;
omsg.type = be64_to_cpu(msg->params[0]);
switch (omsg.type) {
case OCC_RESET:
occ_reset = true;
pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
/*
* powernv_cpufreq_throttle_check() is called in
* target() callback which can detect the throttle state
* for governors like ondemand.
* But static governors will not call target() often thus
* report throttling here.
*/
if (!throttled) {
throttled = true;
pr_warn("CPU frequency is throttled for duration\n");
}
break;
case OCC_LOAD:
pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
break;
case OCC_THROTTLE:
omsg.chip = be64_to_cpu(msg->params[1]);
omsg.throttle_status = be64_to_cpu(msg->params[2]);
if (occ_reset) {
occ_reset = false;
throttled = false;
pr_info("OCC Active, CPU frequency is no longer throttled\n");
for (i = 0; i < nr_chips; i++) {
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
return 0;
}
for (i = 0; i < nr_chips; i++)
if (chips[i].id == omsg.chip)
break;
if (omsg.throttle_status >= 0 &&
omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
chips[i].throttle_reason = omsg.throttle_status;
chips[i].reason[omsg.throttle_status]++;
}
if (!omsg.throttle_status)
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
return 0;
}
static struct notifier_block powernv_cpufreq_opal_nb = {
.notifier_call = powernv_cpufreq_occ_msg,
.next = NULL,
.priority = 0,
};
static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
int index;
struct powernv_smp_call_data freq_data;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
freq_data.pstate_id = powernv_freqs[index].driver_data;
freq_data.gpstate_id = powernv_freqs[index].driver_data;
set_pstate(&freq_data);
return powernv_freqs[index].frequency;
}
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = powernv_cpufreq_cpu_init,
.exit = powernv_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
.fast_switch = powernv_fast_switch,
.get = powernv_cpufreq_get,
.attr = powernv_cpu_freq_attr,
};
static int init_chip_info(void)
{
unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
cpumask_t *chip_cpu_mask;
int ret = 0;
chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
/* Allocate a chip cpu mask large enough to fit mask for all chips */
chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
if (!chip_cpu_mask) {
ret = -ENOMEM;
goto free_and_return;
}
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
if (prev_chip_id != id) {
prev_chip_id = id;
chip[nr_chips++] = id;
}
cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips) {
ret = -ENOMEM;
goto out_free_chip_cpu_mask;
}
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
for_each_cpu(cpu, &chips[i].mask)
per_cpu(chip_info, cpu) = &chips[i];
}
out_free_chip_cpu_mask:
kfree(chip_cpu_mask);
free_and_return:
kfree(chip);
return ret;
}
static inline void clean_chip_info(void)
{
int i;
/* flush any pending work items */
if (chips)
for (i = 0; i < nr_chips; i++)
cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
static inline void unregister_all_notifiers(void)
{
opal_message_notifier_unregister(OPAL_MSG_OCC,
&powernv_cpufreq_opal_nb);
unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
}
static int __init powernv_cpufreq_init(void)
{
int rc = 0;
/* Don't probe on pseries (guest) platforms */
if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
/* Discover pstates from device tree and init */
rc = init_powernv_pstates();
if (rc)
goto out;
/* Populate chip info */
rc = init_chip_info();
if (rc)
goto out;
if (powernv_pstate_info.wof_enabled)
powernv_cpufreq_driver.boost_enabled = true;
else
powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
pr_info("Failed to register the cpufreq driver (%d)\n", rc);
goto cleanup;
}
if (powernv_pstate_info.wof_enabled)
cpufreq_enable_boost_support();
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
return 0;
cleanup:
clean_chip_info();
out:
pr_info("Platform driver disabled. System does not support PState control\n");
return rc;
}
module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
cpufreq_unregister_driver(&powernv_cpufreq_driver);
unregister_all_notifiers();
clean_chip_info();
}
module_exit(powernv_cpufreq_exit);
MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
enum slab_state slab_state;
bool slab_is_available(void)
{
return slab_state >= UP;
}
|
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Apple iPhone 7 Plus (Qualcomm), D11, iPhone9,2 (A1661/A1784/A1785/A1786)
* Copyright (c) 2022, Konrad Dybcio <[email protected]>
*/
/dts-v1/;
#include "t8010-7.dtsi"
/ {
compatible = "apple,d11", "apple,t8010", "apple,arm-platform";
model = "Apple iPhone 7 Plus (Qualcomm)";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
#include "aq_vec.h"
#include "aq_main.h"
#include <net/xdp.h>
#include <linux/filter.h>
#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo;
int i;
if (xdp_buff_has_frags(xdp)) {
sinfo = xdp_get_shared_info_from_buff(xdp);
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
page_ref_inc(skb_frag_page(frag));
}
}
page_ref_inc(buff->rxdata.page);
}
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
/* Drop the ref for being in the ring. */
__free_pages(rxpage->page, rxpage->order);
rxpage->page = NULL;
}
static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
page = dev_alloc_pages(order);
if (unlikely(!page))
goto err_exit;
daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, daddr)))
goto free_page;
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
rxpage->pg_off = rx_ring->page_offset;
return 0;
free_page:
__free_pages(page, order);
err_exit:
return ret;
}
static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
unsigned int order = self->page_order;
u16 page_offset = self->page_offset;
u16 frame_max = self->frame_max;
u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
rxbuf->rxdata.pg_off += frame_max + page_offset +
tail_size;
if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
(PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
*/
aq_free_rxpage(&rxbuf->rxdata,
aq_nic_get_dev(self->aq_nic));
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_losts++;
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
}
}
if (!rxbuf->rxdata.page) {
ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
}
return ret;
}
return 0;
}
static int aq_ring_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic)
{
int err = 0;
self->buff_ring =
kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
if (!self->buff_ring) {
err = -ENOMEM;
goto err_exit;
}
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
if (!self->dx_ring) {
err = -ENOMEM;
goto err_exit;
}
err_exit:
if (err < 0) {
aq_ring_free(self);
}
return err;
}
int aq_ring_tx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->txds;
self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
return aq_ring_alloc(self, aq_nic);
}
int aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
self->xdp_prog = aq_nic->xdp_prog;
self->frame_max = AQ_CFG_RX_FRAME_MAX;
/* Only order-2 is allowed if XDP is enabled */
if (READ_ONCE(self->xdp_prog)) {
self->page_offset = AQ_XDP_HEADROOM;
self->page_order = AQ_CFG_XDP_PAGEORDER;
self->tail_size = AQ_XDP_TAILROOM;
} else {
self->page_offset = 0;
self->page_order = fls(self->frame_max / PAGE_SIZE +
(self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
if (aq_nic_cfg->rxpageorder > self->page_order)
self->page_order = aq_nic_cfg->rxpageorder;
self->tail_size = 0;
}
return aq_ring_alloc(self, aq_nic);
}
int
aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, unsigned int size, unsigned int dx_size)
{
struct device *dev = aq_nic_get_dev(aq_nic);
size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
memset(self, 0, sizeof(*self));
self->aq_nic = aq_nic;
self->idx = idx;
self->size = size;
self->dx_size = dx_size;
self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
GFP_KERNEL);
if (!self->dx_ring) {
aq_ring_free(self);
return -ENOMEM;
}
return 0;
}
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
{
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
self->ring_type = ring_type;
if (self->ring_type == ATL_RING_RX)
u64_stats_init(&self->stats.rx.syncp);
else
u64_stats_init(&self->stats.tx.syncp);
return 0;
}
static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
unsigned int t)
{
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
}
void aq_ring_update_queue_state(struct aq_ring_s *ring)
{
if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
aq_ring_queue_stop(ring);
else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
aq_ring_queue_wake(ring);
}
void aq_ring_queue_wake(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (__netif_subqueue_stopped(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic,
ring->idx))) {
netif_wake_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
u64_stats_update_begin(&ring->stats.tx.syncp);
ring->stats.tx.queue_restarts++;
u64_stats_update_end(&ring->stats.tx.syncp);
}
}
void aq_ring_queue_stop(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (!__netif_subqueue_stopped(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic,
ring->idx)))
netif_stop_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
}
bool aq_ring_tx_clean(struct aq_ring_s *self)
{
struct device *dev = aq_nic_get_dev(self->aq_nic);
unsigned int budget;
for (budget = AQ_CFG_TX_CLEAN_BUDGET;
budget && self->sw_head != self->hw_head; budget--) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
if (likely(buff->is_mapped)) {
if (unlikely(buff->is_sop)) {
if (!buff->is_eop &&
buff->eop_index != 0xffffU &&
(!aq_ring_dx_in_range(self->sw_head,
buff->eop_index,
self->hw_head)))
break;
dma_unmap_single(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
} else {
dma_unmap_page(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
}
}
if (likely(!buff->is_eop))
goto out;
if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
dev_kfree_skb_any(buff->skb);
} else if (buff->xdpf) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
u64_stats_update_end(&self->stats.tx.syncp);
xdp_return_frame_rx_napi(buff->xdpf);
}
out:
buff->skb = NULL;
buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
}
return !!budget;
}
static void aq_rx_checksum(struct aq_ring_s *self,
struct aq_ring_buff_s *buff,
struct sk_buff *skb)
{
if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
return;
if (unlikely(buff->is_cso_err)) {
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.errors;
u64_stats_update_end(&self->stats.rx.syncp);
skb->ip_summed = CHECKSUM_NONE;
return;
}
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
}
int aq_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags)
{
struct aq_nic_s *aq_nic = netdev_priv(dev);
unsigned int vec, i, drop = 0;
int cpu = smp_processor_id();
struct aq_nic_cfg_s *aq_cfg;
struct aq_ring_s *ring;
aq_cfg = aq_nic_get_cfg(aq_nic);
vec = cpu % aq_cfg->vecs;
ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdpf = frames[i];
if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
drop++;
}
return num_frames - drop;
}
static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
struct net_device *dev,
struct aq_ring_buff_s *buff)
{
struct xdp_frame *xdpf;
struct sk_buff *skb;
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return NULL;
skb = xdp_build_skb_from_frame(xdpf, dev);
if (!skb)
return NULL;
aq_get_rxpages_xdp(buff, xdp);
return skb;
}
static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
struct xdp_buff *xdp,
struct aq_ring_s *rx_ring,
struct aq_ring_buff_s *buff)
{
int result = NETDEV_TX_BUSY;
struct aq_ring_s *tx_ring;
struct xdp_frame *xdpf;
struct bpf_prog *prog;
u32 act = XDP_ABORTED;
struct sk_buff *skb;
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.packets;
rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
u64_stats_update_end(&rx_ring->stats.rx.syncp);
prog = READ_ONCE(rx_ring->xdp_prog);
if (!prog)
return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
prefetchw(xdp->data_hard_start); /* xdp_frame write */
/* single buffer XDP program, but packet is multi buffer, aborted */
if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
goto out_aborted;
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
if (!skb)
goto out_aborted;
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_pass;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
return skb;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_aborted;
tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
if (result == NETDEV_TX_BUSY)
goto out_aborted;
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_tx;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
aq_get_rxpages_xdp(buff, xdp);
break;
case XDP_REDIRECT:
if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
goto out_aborted;
xdp_do_flush();
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_redirect;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
aq_get_rxpages_xdp(buff, xdp);
break;
default:
fallthrough;
case XDP_ABORTED:
out_aborted:
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_aborted;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
trace_xdp_exception(aq_nic->ndev, prog, act);
bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
break;
case XDP_DROP:
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_drop;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
break;
}
return ERR_PTR(-result);
}
static bool aq_add_rx_fragment(struct device *dev,
struct aq_ring_s *ring,
struct aq_ring_buff_s *buff,
struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct aq_ring_buff_s *buff_ = buff;
memset(sinfo, 0, sizeof(*sinfo));
do {
skb_frag_t *frag;
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
return true;
frag = &sinfo->frags[sinfo->nr_frags++];
buff_ = &ring->buff_ring[buff_->next];
dma_sync_single_range_for_cpu(dev,
buff_->rxdata.daddr,
buff_->rxdata.pg_off,
buff_->len,
DMA_FROM_DEVICE);
sinfo->xdp_frags_size += buff_->len;
skb_frag_fill_page_desc(frag, buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len);
buff_->is_cleaned = 1;
buff->is_ip_cso &= buff_->is_ip_cso;
buff->is_udp_cso &= buff_->is_udp_cso;
buff->is_tcp_cso &= buff_->is_tcp_cso;
buff->is_cso_err |= buff_->is_cso_err;
if (page_is_pfmemalloc(buff_->rxdata.page))
xdp_buff_set_frag_pfmemalloc(xdp);
} while (!buff_->is_eop);
xdp_buff_set_frags_flag(xdp);
return false;
}
static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
unsigned int i = 0U;
u16 hdr_len;
if (buff->is_cleaned)
continue;
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
buff_ = buff;
do {
bool is_rsc_completed = true;
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
frag_cnt++;
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
is_rsc_completed =
aq_ring_dx_in_range(self->sw_head,
next_,
self->hw_head);
if (unlikely(!is_rsc_completed) ||
frag_cnt > MAX_SKB_FRAGS) {
err = 0;
goto err_exit;
}
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
buff_->is_cleaned = true;
} while (!buff_->is_eop);
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.errors;
u64_stats_update_end(&self->stats.rx.syncp);
continue;
}
}
if (buff->is_error) {
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.errors;
u64_stats_update_end(&self->stats.rx.syncp);
continue;
}
dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
buff->rxdata.daddr,
buff->rxdata.pg_off,
buff->len, DMA_FROM_DEVICE);
skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.skb_alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
err = -ENOMEM;
goto err_exit;
}
if (is_ptp_ring)
buff->len -=
aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
aq_buf_vaddr(&buff->rxdata),
buff->len);
hdr_len = buff->len;
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
hdr_len = eth_get_headlen(skb->dev,
aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_HDR_SIZE);
memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
self->frame_max);
page_ref_inc(buff->rxdata.page);
}
if (!buff->is_eop) {
buff_ = buff;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
buff_->rxdata.daddr,
buff_->rxdata.pg_off,
buff_->len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, i++,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
buff->is_ip_cso &= buff_->is_ip_cso;
buff->is_udp_cso &= buff_->is_udp_cso;
buff->is_tcp_cso &= buff_->is_tcp_cso;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
}
if (buff->is_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
buff->vlan_rx_tag);
skb->protocol = eth_type_trans(skb, ndev);
aq_rx_checksum(self, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
/* Send all PTP traffic to 0 queue */
skb_record_rx_queue(skb,
is_ptp_ring ? 0
: AQ_NIC_RING2QMAP(self->aq_nic,
self->idx));
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
u64_stats_update_end(&self->stats.rx.syncp);
napi_gro_receive(napi, skb);
}
err_exit:
return err;
}
static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
struct napi_struct *napi, int *work_done,
int budget)
{
int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
rx_ring->tail_size;
struct aq_nic_s *aq_nic = rx_ring->aq_nic;
bool is_rsc_completed = true;
struct device *dev;
int err = 0;
dev = aq_nic_get_dev(aq_nic);
for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
struct aq_ring_buff_s *buff_ = NULL;
u16 ptp_hwtstamp_len = 0;
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
struct xdp_buff xdp;
void *hard_start;
if (buff->is_cleaned)
continue;
if (!buff->is_eop) {
buff_ = buff;
do {
if (buff_->next >= rx_ring->size) {
err = -EIO;
goto err_exit;
}
next_ = buff_->next;
buff_ = &rx_ring->buff_ring[next_];
is_rsc_completed =
aq_ring_dx_in_range(rx_ring->sw_head,
next_,
rx_ring->hw_head);
if (unlikely(!is_rsc_completed))
break;
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
if (!is_rsc_completed) {
err = 0;
goto err_exit;
}
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
if (buff_->next >= rx_ring->size) {
err = -EIO;
goto err_exit;
}
next_ = buff_->next;
buff_ = &rx_ring->buff_ring[next_];
buff_->is_cleaned = true;
} while (!buff_->is_eop);
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.errors;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
continue;
}
}
if (buff->is_error) {
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.errors;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
continue;
}
dma_sync_single_range_for_cpu(dev,
buff->rxdata.daddr,
buff->rxdata.pg_off,
buff->len, DMA_FROM_DEVICE);
hard_start = page_address(buff->rxdata.page) +
buff->rxdata.pg_off - rx_ring->page_offset;
if (is_ptp_ring) {
ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
aq_buf_vaddr(&buff->rxdata),
buff->len);
buff->len -= ptp_hwtstamp_len;
}
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
buff->len, false);
if (!buff->is_eop) {
if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.packets;
rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
++rx_ring->stats.rx.xdp_aborted;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
continue;
}
}
skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
if (IS_ERR(skb) || !skb)
continue;
if (ptp_hwtstamp_len > 0)
*skb_hwtstamps(skb) = shhwtstamps;
if (buff->is_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
buff->vlan_rx_tag);
aq_rx_checksum(rx_ring, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
/* Send all PTP traffic to 0 queue */
skb_record_rx_queue(skb,
is_ptp_ring ? 0
: AQ_NIC_RING2QMAP(rx_ring->aq_nic,
rx_ring->idx));
napi_gro_receive(napi, skb);
}
err_exit:
return err;
}
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
int budget)
{
if (static_branch_unlikely(&aq_xdp_locking_key))
return __aq_ring_xdp_clean(self, napi, work_done, budget);
else
return __aq_ring_rx_clean(self, napi, work_done, budget);
}
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
while (self->sw_head != self->hw_head) {
u64 ns;
aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
self->dx_ring +
(self->sw_head * self->dx_size),
self->dx_size, &ns);
aq_ptp_tx_hwtstamp(aq_nic, ns);
self->sw_head = aq_ring_next_dx(self, self->sw_head);
}
#endif
}
int aq_ring_rx_fill(struct aq_ring_s *self)
{
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
self->size / 2))
return err;
for (i = aq_ring_avail_dx(self); i--;
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
buff->len = self->frame_max;
err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
buff->pa = aq_buf_daddr(&buff->rxdata);
buff = NULL;
}
err_exit:
return err;
}
void aq_ring_rx_deinit(struct aq_ring_s *self)
{
if (!self)
return;
for (; self->sw_head != self->sw_tail;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
}
}
void aq_ring_free(struct aq_ring_s *self)
{
if (!self)
return;
kfree(self->buff_ring);
self->buff_ring = NULL;
if (self->dx_ring) {
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
self->dx_ring = NULL;
}
}
void aq_ring_hwts_rx_free(struct aq_ring_s *self)
{
if (!self)
return;
if (self->dx_ring) {
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size + AQ_CFG_RXDS_DEF,
self->dx_ring, self->dx_ring_pa);
self->dx_ring = NULL;
}
}
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
{
unsigned int count;
unsigned int start;
if (self->ring_type == ATL_RING_RX) {
/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
do {
count = 0;
start = u64_stats_fetch_begin(&self->stats.rx.syncp);
data[count] = self->stats.rx.packets;
data[++count] = self->stats.rx.jumbo_packets;
data[++count] = self->stats.rx.lro_packets;
data[++count] = self->stats.rx.errors;
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
data[++count] = self->stats.rx.pg_flips;
data[++count] = self->stats.rx.pg_reuses;
data[++count] = self->stats.rx.pg_losts;
data[++count] = self->stats.rx.xdp_aborted;
data[++count] = self->stats.rx.xdp_drop;
data[++count] = self->stats.rx.xdp_pass;
data[++count] = self->stats.rx.xdp_tx;
data[++count] = self->stats.rx.xdp_invalid;
data[++count] = self->stats.rx.xdp_redirect;
} while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
do {
count = 0;
start = u64_stats_fetch_begin(&self->stats.tx.syncp);
data[count] = self->stats.tx.packets;
data[++count] = self->stats.tx.queue_restarts;
} while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));
}
return ++count;
}
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU-based infrastructure for lightweight reader-writer locking
*
* Copyright (c) 2015, Red Hat, Inc.
*
* Author: Oleg Nesterov <[email protected]>
*/
#ifndef _LINUX_RCU_SYNC_H_
#define _LINUX_RCU_SYNC_H_
#include <linux/wait.h>
#include <linux/rcupdate.h>
/* Structure to mediate between updaters and fastpath-using readers. */
struct rcu_sync {
int gp_state;
int gp_count;
wait_queue_head_t gp_wait;
struct rcu_head cb_head;
};
/**
* rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
* @rsp: Pointer to rcu_sync structure to use for synchronization
*
* Returns true if readers are permitted to use their fastpaths. Must be
* invoked within some flavor of RCU read-side critical section.
*/
static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
"suspicious rcu_sync_is_idle() usage");
return !READ_ONCE(rsp->gp_state); /* GP_IDLE */
}
extern void rcu_sync_init(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
#define __RCU_SYNC_INITIALIZER(name) { \
.gp_state = 0, \
.gp_count = 0, \
.gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
}
#define DEFINE_RCU_SYNC(name) \
struct rcu_sync name = __RCU_SYNC_INITIALIZER(name)
#endif /* _LINUX_RCU_SYNC_H_ */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Ethernet driver for the WIZnet W5100 chip.
*
* Copyright (C) 2006-2008 WIZnet Co.,Ltd.
* Copyright (C) 2012 Mike Sinkovsky <[email protected]>
*/
enum {
W5100,
W5200,
W5500,
};
struct w5100_ops {
bool may_sleep;
int chip_id;
int (*read)(struct net_device *ndev, u32 addr);
int (*write)(struct net_device *ndev, u32 addr, u8 data);
int (*read16)(struct net_device *ndev, u32 addr);
int (*write16)(struct net_device *ndev, u32 addr, u16 data);
int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len);
int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf,
int len);
int (*reset)(struct net_device *ndev);
int (*init)(struct net_device *ndev);
};
void *w5100_ops_priv(const struct net_device *ndev);
int w5100_probe(struct device *dev, const struct w5100_ops *ops,
int sizeof_ops_priv, const void *mac_addr, int irq,
int link_gpio);
void w5100_remove(struct device *dev);
extern const struct dev_pm_ops w5100_pm_ops;
|
/*
* Copyright (C) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _mp_12_0_0_SH_MASK_HEADER
#define _mp_12_0_0_SH_MASK_HEADER
// addressBlock: mp_SmuMp0_SmnDec
//MP0_SMN_C2PMSG_32
#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_33
#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_34
#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_35
#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_36
#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_37
#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_38
#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_39
#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_40
#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_41
#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_42
#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_43
#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_44
#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_45
#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_46
#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_47
#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_48
#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_49
#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_50
#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_51
#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_52
#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_53
#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_54
#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_55
#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_56
#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_57
#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_58
#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_59
#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_60
#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_61
#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_62
#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_63
#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_64
#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_65
#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_66
#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_67
#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_68
#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_69
#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_70
#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_71
#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_72
#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_73
#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_74
#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_75
#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_76
#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_77
#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_78
#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_79
#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_80
#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_81
#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_82
#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_83
#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_84
#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_85
#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_86
#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_87
#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_88
#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_89
#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_90
#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_91
#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_92
#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_93
#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_94
#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_95
#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_96
#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_97
#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_98
#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_99
#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_100
#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_101
#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_102
#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_103
#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_IH_CREDIT
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
//MP0_SMN_IH_SW_INT
#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
// addressBlock: mp_SmuMp1_SmnDec
//MP1_SMN_C2PMSG_32
#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_33
#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_34
#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_35
#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_36
#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_37
#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_38
#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_39
#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_40
#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_41
#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_42
#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_43
#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_44
#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_45
#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_46
#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_47
#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_48
#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_49
#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_50
#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_51
#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_52
#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_53
#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_54
#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_55
#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_56
#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_57
#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_58
#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_59
#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_60
#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_61
#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_62
#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_63
#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_64
#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_65
#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_66
#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_67
#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_68
#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_69
#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_70
#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_71
#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_72
#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_73
#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_74
#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_75
#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_76
#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_77
#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_78
#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_79
#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_80
#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_81
#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_82
#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_83
#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_84
#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_85
#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_86
#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_87
#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_88
#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_89
#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_90
#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_91
#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_92
#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_93
#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_94
#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_95
#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_96
#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_97
#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_98
#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_99
#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_100
#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_101
#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_102
#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_103
#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_IH_CREDIT
#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
//MP1_SMN_IH_SW_INT
#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
//MP1_SMN_FPS_CNT
#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
// addressBlock: mp_SmuMp0Pub_CruDec
//MP0_IH_CREDIT
#define MP0_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP0_IH_CREDIT__CLIENT_ID__SHIFT 0x10
#define MP0_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
#define MP0_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
//MP0_IH_SW_INT
#define MP0_IH_SW_INT__ID__SHIFT 0x0
#define MP0_IH_SW_INT__VALID__SHIFT 0x8
#define MP0_IH_SW_INT__ID_MASK 0x000000FFL
#define MP0_IH_SW_INT__VALID_MASK 0x00000100L
//MP0_IH_SW_INT_CTRL
#define MP0_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
#define MP0_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
#define MP0_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
#define MP0_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
// addressBlock: mp_SmuMp1Pub_CruDec
//MP1_FIRMWARE_FLAGS
#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
//MP1_C2PMSG_0
#define MP1_C2PMSG_0__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_1
#define MP1_C2PMSG_1__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_2
#define MP1_C2PMSG_2__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_3
#define MP1_C2PMSG_3__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_4
#define MP1_C2PMSG_4__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_5
#define MP1_C2PMSG_5__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_6
#define MP1_C2PMSG_6__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_7
#define MP1_C2PMSG_7__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_8
#define MP1_C2PMSG_8__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_9
#define MP1_C2PMSG_9__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_10
#define MP1_C2PMSG_10__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_11
#define MP1_C2PMSG_11__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_12
#define MP1_C2PMSG_12__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_13
#define MP1_C2PMSG_13__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_14
#define MP1_C2PMSG_14__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_15
#define MP1_C2PMSG_15__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_16
#define MP1_C2PMSG_16__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_17
#define MP1_C2PMSG_17__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_18
#define MP1_C2PMSG_18__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_19
#define MP1_C2PMSG_19__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_20
#define MP1_C2PMSG_20__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_21
#define MP1_C2PMSG_21__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_22
#define MP1_C2PMSG_22__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_23
#define MP1_C2PMSG_23__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_24
#define MP1_C2PMSG_24__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_25
#define MP1_C2PMSG_25__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_26
#define MP1_C2PMSG_26__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_27
#define MP1_C2PMSG_27__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_28
#define MP1_C2PMSG_28__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_29
#define MP1_C2PMSG_29__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_30
#define MP1_C2PMSG_30__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_31
#define MP1_C2PMSG_31__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
//MP1_P2CMSG_0
#define MP1_P2CMSG_0__CONTENT__SHIFT 0x0
#define MP1_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
//MP1_P2CMSG_1
#define MP1_P2CMSG_1__CONTENT__SHIFT 0x0
#define MP1_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
//MP1_P2CMSG_2
#define MP1_P2CMSG_2__CONTENT__SHIFT 0x0
#define MP1_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
//MP1_P2CMSG_3
#define MP1_P2CMSG_3__CONTENT__SHIFT 0x0
#define MP1_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
//MP1_P2CMSG_INTEN
#define MP1_P2CMSG_INTEN__INTEN__SHIFT 0x0
#define MP1_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
//MP1_P2CMSG_INTSTS
#define MP1_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
#define MP1_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
#define MP1_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
#define MP1_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
#define MP1_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
#define MP1_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
#define MP1_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
#define MP1_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
//MP1_C2PMSG_32
#define MP1_C2PMSG_32__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_33
#define MP1_C2PMSG_33__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_34
#define MP1_C2PMSG_34__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_35
#define MP1_C2PMSG_35__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_36
#define MP1_C2PMSG_36__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_37
#define MP1_C2PMSG_37__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_38
#define MP1_C2PMSG_38__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_39
#define MP1_C2PMSG_39__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_40
#define MP1_C2PMSG_40__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_41
#define MP1_C2PMSG_41__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_42
#define MP1_C2PMSG_42__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_43
#define MP1_C2PMSG_43__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_44
#define MP1_C2PMSG_44__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_45
#define MP1_C2PMSG_45__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_46
#define MP1_C2PMSG_46__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_47
#define MP1_C2PMSG_47__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_48
#define MP1_C2PMSG_48__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_49
#define MP1_C2PMSG_49__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_50
#define MP1_C2PMSG_50__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_51
#define MP1_C2PMSG_51__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_52
#define MP1_C2PMSG_52__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_53
#define MP1_C2PMSG_53__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_54
#define MP1_C2PMSG_54__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_55
#define MP1_C2PMSG_55__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_56
#define MP1_C2PMSG_56__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_57
#define MP1_C2PMSG_57__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_58
#define MP1_C2PMSG_58__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_59
#define MP1_C2PMSG_59__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_60
#define MP1_C2PMSG_60__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_61
#define MP1_C2PMSG_61__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_62
#define MP1_C2PMSG_62__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_63
#define MP1_C2PMSG_63__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_64
#define MP1_C2PMSG_64__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_65
#define MP1_C2PMSG_65__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_66
#define MP1_C2PMSG_66__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_67
#define MP1_C2PMSG_67__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_68
#define MP1_C2PMSG_68__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_69
#define MP1_C2PMSG_69__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_70
#define MP1_C2PMSG_70__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_71
#define MP1_C2PMSG_71__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_72
#define MP1_C2PMSG_72__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_73
#define MP1_C2PMSG_73__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_74
#define MP1_C2PMSG_74__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_75
#define MP1_C2PMSG_75__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_76
#define MP1_C2PMSG_76__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_77
#define MP1_C2PMSG_77__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_78
#define MP1_C2PMSG_78__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_79
#define MP1_C2PMSG_79__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_80
#define MP1_C2PMSG_80__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_81
#define MP1_C2PMSG_81__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_82
#define MP1_C2PMSG_82__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_83
#define MP1_C2PMSG_83__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_84
#define MP1_C2PMSG_84__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_85
#define MP1_C2PMSG_85__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_86
#define MP1_C2PMSG_86__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_87
#define MP1_C2PMSG_87__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_88
#define MP1_C2PMSG_88__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_89
#define MP1_C2PMSG_89__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_90
#define MP1_C2PMSG_90__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_91
#define MP1_C2PMSG_91__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_92
#define MP1_C2PMSG_92__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_93
#define MP1_C2PMSG_93__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_94
#define MP1_C2PMSG_94__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_95
#define MP1_C2PMSG_95__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_96
#define MP1_C2PMSG_96__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_97
#define MP1_C2PMSG_97__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_98
#define MP1_C2PMSG_98__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_99
#define MP1_C2PMSG_99__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_100
#define MP1_C2PMSG_100__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_101
#define MP1_C2PMSG_101__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_102
#define MP1_C2PMSG_102__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
//MP1_C2PMSG_103
#define MP1_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP1_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
//MP1_IH_CREDIT
#define MP1_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP1_IH_CREDIT__CLIENT_ID__SHIFT 0x10
#define MP1_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
#define MP1_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
//MP1_IH_SW_INT
#define MP1_IH_SW_INT__ID__SHIFT 0x0
#define MP1_IH_SW_INT__VALID__SHIFT 0x8
#define MP1_IH_SW_INT__ID_MASK 0x000000FFL
#define MP1_IH_SW_INT__VALID_MASK 0x00000100L
//MP1_IH_SW_INT_CTRL
#define MP1_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
#define MP1_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
#define MP1_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
#define MP1_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
//MP1_FPS_CNT
#define MP1_FPS_CNT__COUNT__SHIFT 0x0
#define MP1_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Device Driver Control Block (DDCB) queue support. Definition of
* interrupt handlers for queue support as well as triggering the
* health monitor code in case of problems. The current hardware uses
* an MSI interrupt which is shared between error handling and
* functional code.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/crc-itu-t.h>
#include "card_base.h"
#include "card_ddcb.h"
/*
* N: next DDCB, this is where the next DDCB will be put.
* A: active DDCB, this is where the code will look for the next completion.
* x: DDCB is enqueued, we are waiting for its completion.
* Situation (1): Empty queue
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | | | | | | | | |
* +---+---+---+---+---+---+---+---+
* A/N
* enqueued_ddcbs = A - N = 2 - 2 = 0
*
* Situation (2): Wrapped, N > A
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | | | x | x | | | | |
* +---+---+---+---+---+---+---+---+
* A N
* enqueued_ddcbs = N - A = 4 - 2 = 2
*
* Situation (3): Queue wrapped, A > N
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | x | x | | | x | x | x | x |
* +---+---+---+---+---+---+---+---+
* N A
* enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6
*
* Situation (4a): Queue full N > A
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | x | x | x | x | x | x | x | |
* +---+---+---+---+---+---+---+---+
* A N
*
* enqueued_ddcbs = N - A = 7 - 0 = 7
*
* Situation (4a): Queue full A > N
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | x | x | x | | x | x | x | x |
* +---+---+---+---+---+---+---+---+
* N A
* enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7
*/
static int queue_empty(struct ddcb_queue *queue)
{
return queue->ddcb_next == queue->ddcb_act;
}
static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
{
if (queue->ddcb_next >= queue->ddcb_act)
return queue->ddcb_next - queue->ddcb_act;
return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
}
static int queue_free_ddcbs(struct ddcb_queue *queue)
{
int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */
return 0;
}
return free_ddcbs;
}
/*
* Use of the PRIV field in the DDCB for queue debugging:
*
* (1) Trying to get rid of a DDCB which saw a timeout:
* pddcb->priv[6] = 0xcc; # cleared
*
* (2) Append a DDCB via NEXT bit:
* pddcb->priv[7] = 0xaa; # appended
*
* (3) DDCB needed tapping:
* pddcb->priv[7] = 0xbb; # tapped
*
* (4) DDCB marked as correctly finished:
* pddcb->priv[6] = 0xff; # finished
*/
static inline void ddcb_mark_tapped(struct ddcb *pddcb)
{
pddcb->priv[7] = 0xbb; /* tapped */
}
static inline void ddcb_mark_appended(struct ddcb *pddcb)
{
pddcb->priv[7] = 0xaa; /* appended */
}
static inline void ddcb_mark_cleared(struct ddcb *pddcb)
{
pddcb->priv[6] = 0xcc; /* cleared */
}
static inline void ddcb_mark_finished(struct ddcb *pddcb)
{
pddcb->priv[6] = 0xff; /* finished */
}
static inline void ddcb_mark_unused(struct ddcb *pddcb)
{
pddcb->priv_64 = cpu_to_be64(0); /* not tapped */
}
/**
* genwqe_crc16() - Generate 16-bit crc as required for DDCBs
* @buff: pointer to data buffer
* @len: length of data for calculation
* @init: initial crc (0xffff at start)
*
* Polynomial = x^16 + x^12 + x^5 + 1 (0x1021)
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff
* should result in a crc16 of 0x89c3
*
* Return: crc16 checksum in big endian format !
*/
static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
{
return crc_itu_t(init, buff, len);
}
static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
{
int i;
struct ddcb *pddcb;
unsigned long flags;
struct pci_dev *pci_dev = cd->pci_dev;
spin_lock_irqsave(&cd->print_lock, flags);
dev_info(&pci_dev->dev,
"DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
cd->card_idx, queue->ddcb_act, queue->ddcb_next);
pddcb = queue->ddcb_vaddr;
for (i = 0; i < queue->ddcb_max; i++) {
dev_err(&pci_dev->dev,
" %c %-3d: RETC=%03x SEQ=%04x HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
i == queue->ddcb_act ? '>' : ' ',
i,
be16_to_cpu(pddcb->retc_16),
be16_to_cpu(pddcb->seqnum_16),
pddcb->hsi,
pddcb->shi,
be64_to_cpu(pddcb->priv_64),
pddcb->cmd);
pddcb++;
}
spin_unlock_irqrestore(&cd->print_lock, flags);
}
struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
{
struct ddcb_requ *req;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
return &req->cmd;
}
void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
{
struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
kfree(req);
}
static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
{
return req->req_state;
}
static inline void ddcb_requ_set_state(struct ddcb_requ *req,
enum genwqe_requ_state new_state)
{
req->req_state = new_state;
}
static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
{
return req->cmd.ddata_addr != 0x0;
}
/**
* ddcb_requ_finished() - Returns the hardware state of the associated DDCB
* @cd: pointer to genwqe device descriptor
* @req: DDCB work request
*
* Status of ddcb_requ mirrors this hardware state, but is copied in
* the ddcb_requ on interrupt/polling function. The lowlevel code
* should check the hardware state directly, the higher level code
* should check the copy.
*
* This function will also return true if the state of the queue is
* not GENWQE_CARD_USED. This enables us to purge all DDCBs in the
* shutdown case.
*/
static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
{
return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
(cd->card_state != GENWQE_CARD_USED);
}
#define RET_DDCB_APPENDED 1
#define RET_DDCB_TAPPED 2
/**
* enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @queue: queue this operation should be done on
* @pddcb: pointer to ddcb structure
* @ddcb_no: pointer to ddcb number being tapped
*
* Start execution of DDCB by tapping or append to queue via NEXT
* bit. This is done by an atomic 'compare and swap' instruction and
* checking SHI and HSI of the previous DDCB.
*
* This function must only be called with ddcb_lock held.
*
* Return: 1 if new DDCB is appended to previous
* 2 if DDCB queue is tapped via register/simulation
*/
static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
struct ddcb *pddcb, int ddcb_no)
{
unsigned int try;
int prev_no;
struct ddcb *prev_ddcb;
__be32 old, new, icrc_hsi_shi;
u64 num;
/*
* For performance checks a Dispatch Timestamp can be put into
* DDCB It is supposed to use the SLU's free running counter,
* but this requires PCIe cycles.
*/
ddcb_mark_unused(pddcb);
/* check previous DDCB if already fetched */
prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
prev_ddcb = &queue->ddcb_vaddr[prev_no];
/*
* It might have happened that the HSI.FETCHED bit is
* set. Retry in this case. Therefore I expect maximum 2 times
* trying.
*/
ddcb_mark_appended(pddcb);
for (try = 0; try < 2; try++) {
old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
/* try to append via NEXT bit if prev DDCB is not completed */
if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
break;
new = (old | DDCB_NEXT_BE32);
wmb(); /* need to ensure write ordering */
icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
if (icrc_hsi_shi == old)
return RET_DDCB_APPENDED; /* appended to queue */
}
/* Queue must be re-started by updating QUEUE_OFFSET */
ddcb_mark_tapped(pddcb);
num = (u64)ddcb_no << 8;
wmb(); /* need to ensure write ordering */
__genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
return RET_DDCB_TAPPED;
}
/**
* copy_ddcb_results() - Copy output state from real DDCB to request
* @req: pointer to requested DDCB parameters
* @ddcb_no: pointer to ddcb number being tapped
*
* Copy DDCB ASV to request struct. There is no endian
* conversion made, since data structure in ASV is still
* unknown here.
*
* This is needed by:
* - genwqe_purge_ddcb()
* - genwqe_check_ddcb_queue()
*/
static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
{
struct ddcb_queue *queue = req->queue;
struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
/* copy status flags of the variant part */
req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
req->cmd.attn = be16_to_cpu(pddcb->attn_16);
req->cmd.progress = be32_to_cpu(pddcb->progress_32);
req->cmd.retc = be16_to_cpu(pddcb->retc_16);
if (ddcb_requ_collect_debug_data(req)) {
int prev_no = (ddcb_no == 0) ?
queue->ddcb_max - 1 : ddcb_no - 1;
struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
memcpy(&req->debug_data.ddcb_finished, pddcb,
sizeof(req->debug_data.ddcb_finished));
memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
sizeof(req->debug_data.ddcb_prev));
}
}
/**
* genwqe_check_ddcb_queue() - Checks DDCB queue for completed work requests.
* @cd: pointer to genwqe device descriptor
* @queue: queue to be checked
*
* Return: Number of DDCBs which were finished
*/
static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
struct ddcb_queue *queue)
{
unsigned long flags;
int ddcbs_finished = 0;
struct pci_dev *pci_dev = cd->pci_dev;
spin_lock_irqsave(&queue->ddcb_lock, flags);
/* FIXME avoid soft locking CPU */
while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
struct ddcb *pddcb;
struct ddcb_requ *req;
u16 vcrc, vcrc_16, retc_16;
pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
0x00000000)
goto go_home; /* not completed, continue waiting */
wmb(); /* Add sync to decouple prev. read operations */
/* Note: DDCB could be purged */
req = queue->ddcb_req[queue->ddcb_act];
if (req == NULL) {
/* this occurs if DDCB is purged, not an error */
/* Move active DDCB further; Nothing to do anymore. */
goto pick_next_one;
}
/*
* HSI=0x44 (fetched and completed), but RETC is
* 0x101, or even worse 0x000.
*
* In case of seeing the queue in inconsistent state
* we read the errcnts and the queue status to provide
* a trigger for our PCIe analyzer stop capturing.
*/
retc_16 = be16_to_cpu(pddcb->retc_16);
if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
u64 errcnts, status;
u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
dev_err(&pci_dev->dev,
"[%s] SEQN=%04x HSI=%02x RETC=%03x Q_ERRCNTS=%016llx Q_STATUS=%016llx DDCB_DMA_ADDR=%016llx\n",
__func__, be16_to_cpu(pddcb->seqnum_16),
pddcb->hsi, retc_16, errcnts, status,
queue->ddcb_daddr + ddcb_offs);
}
copy_ddcb_results(req, queue->ddcb_act);
queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
ddcb_mark_finished(pddcb);
/* calculate CRC_16 to see if VCRC is correct */
vcrc = genwqe_crc16(pddcb->asv,
VCRC_LENGTH(req->cmd.asv_length),
0xffff);
vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
if (vcrc != vcrc_16) {
printk_ratelimited(KERN_ERR
"%s %s: err: wrong VCRC pre=%02x vcrc_len=%d bytes vcrc_data=%04x is not vcrc_card=%04x\n",
GENWQE_DEVNAME, dev_name(&pci_dev->dev),
pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
vcrc, vcrc_16);
}
ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
queue->ddcbs_completed++;
queue->ddcbs_in_flight--;
/* wake up process waiting for this DDCB, and
processes on the busy queue */
wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
wake_up_interruptible(&queue->busy_waitq);
pick_next_one:
queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
ddcbs_finished++;
}
go_home:
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return ddcbs_finished;
}
/**
* __genwqe_wait_ddcb(): Waits until DDCB is completed
* @cd: pointer to genwqe device descriptor
* @req: pointer to requsted DDCB parameters
*
* The Service Layer will update the RETC in DDCB when processing is
* pending or done.
*
* Return: > 0 remaining jiffies, DDCB completed
* -ETIMEDOUT when timeout
* -ERESTARTSYS when ^C
* -EINVAL when unknown error condition
*
* When an error is returned the called needs to ensure that
* purge_ddcb() is being called to get the &req removed from the
* queue.
*/
int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
{
int rc;
unsigned int ddcb_no;
struct ddcb_queue *queue;
struct pci_dev *pci_dev = cd->pci_dev;
if (req == NULL)
return -EINVAL;
queue = req->queue;
if (queue == NULL)
return -EINVAL;
ddcb_no = req->num;
if (ddcb_no >= queue->ddcb_max)
return -EINVAL;
rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
ddcb_requ_finished(cd, req),
GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ);
/*
* We need to distinguish 3 cases here:
* 1. rc == 0 timeout occurred
* 2. rc == -ERESTARTSYS signal received
* 3. rc > 0 remaining jiffies condition is true
*/
if (rc == 0) {
struct ddcb_queue *queue = req->queue;
struct ddcb *pddcb;
/*
* Timeout may be caused by long task switching time.
* When timeout happens, check if the request has
* meanwhile completed.
*/
genwqe_check_ddcb_queue(cd, req->queue);
if (ddcb_requ_finished(cd, req))
return rc;
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
__func__, req->num, rc, ddcb_requ_get_state(req),
req);
dev_err(&pci_dev->dev,
"[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
__genwqe_readq(cd, queue->IO_QUEUE_STATUS));
pddcb = &queue->ddcb_vaddr[req->num];
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
print_ddcb_info(cd, req->queue);
return -ETIMEDOUT;
} else if (rc == -ERESTARTSYS) {
return rc;
/*
* EINTR: Stops the application
* ERESTARTSYS: Restartable systemcall; called again
*/
} else if (rc < 0) {
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
__func__, req->num, rc, ddcb_requ_get_state(req));
return -EINVAL;
}
/* Severe error occured. Driver is forced to stop operation */
if (cd->card_state != GENWQE_CARD_USED) {
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d forced to stop (rc=%d)\n",
__func__, req->num, rc);
return -EIO;
}
return rc;
}
/**
* get_next_ddcb() - Get next available DDCB
* @cd: pointer to genwqe device descriptor
* @queue: DDCB queue
* @num: internal DDCB number
*
* DDCB's content is completely cleared but presets for PRE and
* SEQNUM. This function must only be called when ddcb_lock is held.
*
* Return: NULL if no empty DDCB available otherwise ptr to next DDCB.
*/
static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
struct ddcb_queue *queue,
int *num)
{
u64 *pu64;
struct ddcb *pddcb;
if (queue_free_ddcbs(queue) == 0) /* queue is full */
return NULL;
/* find new ddcb */
pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
/* if it is not completed, we are not allowed to use it */
/* barrier(); */
if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
return NULL;
*num = queue->ddcb_next; /* internal DDCB number */
queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
/* clear important DDCB fields */
pu64 = (u64 *)pddcb;
pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */
pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */
/* destroy previous results in ASV */
pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */
pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */
pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */
pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */
pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */
pddcb->pre = DDCB_PRESET_PRE; /* 128 */
pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
return pddcb;
}
/**
* __genwqe_purge_ddcb() - Remove a DDCB from the workqueue
* @cd: genwqe device descriptor
* @req: DDCB request
*
* This will fail when the request was already FETCHED. In this case
* we need to wait until it is finished. Else the DDCB can be
* reused. This function also ensures that the request data structure
* is removed from ddcb_req[].
*
* Do not forget to call this function when genwqe_wait_ddcb() fails,
* such that the request gets really removed from ddcb_req[].
*
* Return: 0 success
*/
int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
{
struct ddcb *pddcb = NULL;
unsigned int t;
unsigned long flags;
struct ddcb_queue *queue = req->queue;
struct pci_dev *pci_dev = cd->pci_dev;
u64 queue_status;
__be32 icrc_hsi_shi = 0x0000;
__be32 old, new;
/* unsigned long flags; */
if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) {
dev_err(&pci_dev->dev,
"[%s] err: software timeout is not set!\n", __func__);
return -EFAULT;
}
pddcb = &queue->ddcb_vaddr[req->num];
for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
spin_lock_irqsave(&queue->ddcb_lock, flags);
/* Check if req was meanwhile finished */
if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
goto go_home;
/* try to set PURGE bit if FETCHED/COMPLETED are not set */
old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
new = (old | DDCB_PURGE_BE32);
icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
old, new);
if (icrc_hsi_shi == old)
goto finish_ddcb;
}
/* normal finish with HSI bit */
barrier();
icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
goto finish_ddcb;
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
/*
* Here the check_ddcb() function will most likely
* discover this DDCB to be finished some point in
* time. It will mark the req finished and free it up
* in the list.
*/
copy_ddcb_results(req, req->num); /* for the failing case */
msleep(100); /* sleep for 1/10 second and try again */
continue;
finish_ddcb:
copy_ddcb_results(req, req->num);
ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
queue->ddcbs_in_flight--;
queue->ddcb_req[req->num] = NULL; /* delete from array */
ddcb_mark_cleared(pddcb);
/* Move active DDCB further; Nothing to do here anymore. */
/*
* We need to ensure that there is at least one free
* DDCB in the queue. To do that, we must update
* ddcb_act only if the COMPLETED bit is set for the
* DDCB we are working on else we treat that DDCB even
* if we PURGED it as occupied (hardware is supposed
* to set the COMPLETED bit yet!).
*/
icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
(queue->ddcb_act == req->num)) {
queue->ddcb_act = ((queue->ddcb_act + 1) %
queue->ddcb_max);
}
go_home:
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
/*
* If the card is dead and the queue is forced to stop, we
* might see this in the queue status register.
*/
queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n",
__func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
queue_status);
print_ddcb_info(cd, req->queue);
return -EFAULT;
}
int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
{
int len;
struct pci_dev *pci_dev = cd->pci_dev;
if (d == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: invalid memory for debug data!\n",
__func__);
return -EFAULT;
}
len = sizeof(d->driver_version);
snprintf(d->driver_version, len, "%s", DRV_VERSION);
d->slu_unitcfg = cd->slu_unitcfg;
d->app_unitcfg = cd->app_unitcfg;
return 0;
}
/**
* __genwqe_enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @req: pointer to DDCB execution request
* @f_flags: file mode: blocking, non-blocking
*
* Return: 0 if enqueuing succeeded
* -EIO if card is unusable/PCIe problems
* -EBUSY if enqueuing failed
*/
int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
unsigned int f_flags)
{
struct ddcb *pddcb;
unsigned long flags;
struct ddcb_queue *queue;
struct pci_dev *pci_dev = cd->pci_dev;
u16 icrc;
retry:
if (cd->card_state != GENWQE_CARD_USED) {
printk_ratelimited(KERN_ERR
"%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
GENWQE_DEVNAME, dev_name(&pci_dev->dev),
__func__, req->num);
return -EIO;
}
queue = req->queue = &cd->queue;
/* FIXME circumvention to improve performance when no irq is
* there.
*/
if (GENWQE_POLLING_ENABLED)
genwqe_check_ddcb_queue(cd, queue);
/*
* It must be ensured to process all DDCBs in successive
* order. Use a lock here in order to prevent nested DDCB
* enqueuing.
*/
spin_lock_irqsave(&queue->ddcb_lock, flags);
pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
if (pddcb == NULL) {
int rc;
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
if (f_flags & O_NONBLOCK) {
queue->return_on_busy++;
return -EBUSY;
}
queue->wait_on_busy++;
rc = wait_event_interruptible(queue->busy_waitq,
queue_free_ddcbs(queue) != 0);
dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n",
__func__, rc);
if (rc == -ERESTARTSYS)
return rc; /* interrupted by a signal */
goto retry;
}
if (queue->ddcb_req[req->num] != NULL) {
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
dev_err(&pci_dev->dev,
"[%s] picked DDCB %d with req=%p still in use!!\n",
__func__, req->num, req);
return -EFAULT;
}
ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
queue->ddcb_req[req->num] = req;
pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
pddcb->cmd = req->cmd.cmd;
pddcb->acfunc = req->cmd.acfunc; /* functional unit */
/*
* We know that we can get retc 0x104 with CRC error, do not
* stop the queue in those cases for this command. XDIR = 1
* does not work for old SLU versions.
*
* Last bitstream with the old XDIR behavior had SLU_ID
* 0x34199.
*/
if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
pddcb->xdir = 0x1;
else
pddcb->xdir = 0x0;
pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
((req->cmd.asv_length / 8)));
pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
/*
* If copying the whole DDCB_ASIV_LENGTH is impacting
* performance we need to change it to
* req->cmd.asiv_length. But simulation benefits from some
* non-architectured bits behind the architectured content.
*
* How much data is copied depends on the availability of the
* ATS field, which was introduced late. If the ATS field is
* supported ASIV is 8 bytes shorter than it used to be. Since
* the ATS field is copied too, the code should do exactly
* what it did before, but I wanted to make copying of the ATS
* field very explicit.
*/
if (genwqe_get_slu_id(cd) <= 0x2) {
memcpy(&pddcb->__asiv[0], /* destination */
&req->cmd.__asiv[0], /* source */
DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */
} else {
pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
memcpy(&pddcb->n.asiv[0], /* destination */
&req->cmd.asiv[0], /* source */
DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */
}
pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */
/*
* Calculate CRC_16 for corresponding range PSP(7:4). Include
* empty 4 bytes prior to the data.
*/
icrc = genwqe_crc16((const u8 *)pddcb,
ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
/* enable DDCB completion irq */
if (!GENWQE_POLLING_ENABLED)
pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
if (ddcb_requ_collect_debug_data(req)) {
/* use the kernel copy of debug data. copying back to
user buffer happens later */
genwqe_init_debug_data(cd, &req->debug_data);
memcpy(&req->debug_data.ddcb_before, pddcb,
sizeof(req->debug_data.ddcb_before));
}
enqueue_ddcb(cd, queue, pddcb, req->num);
queue->ddcbs_in_flight++;
if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
wake_up_interruptible(&cd->queue_waitq);
return 0;
}
/**
* __genwqe_execute_raw_ddcb() - Setup and execute DDCB
* @cd: pointer to genwqe device descriptor
* @cmd: user provided DDCB command
* @f_flags: file mode: blocking, non-blocking
*/
int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
struct genwqe_ddcb_cmd *cmd,
unsigned int f_flags)
{
int rc = 0;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
__func__, cmd->asiv_length);
return -EINVAL;
}
if (cmd->asv_length > DDCB_ASV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
__func__, cmd->asiv_length);
return -EINVAL;
}
rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
if (rc != 0)
return rc;
rc = __genwqe_wait_ddcb(cd, req);
if (rc < 0) /* error or signal interrupt */
goto err_exit;
if (ddcb_requ_collect_debug_data(req)) {
if (copy_to_user((struct genwqe_debug_data __user *)
(unsigned long)cmd->ddata_addr,
&req->debug_data,
sizeof(struct genwqe_debug_data)))
return -EFAULT;
}
/*
* Higher values than 0x102 indicate completion with faults,
* lower values than 0x102 indicate processing faults. Note
* that DDCB might have been purged. E.g. Cntl+C.
*/
if (cmd->retc != DDCB_RETC_COMPLETE) {
/* This might happen e.g. flash read, and needs to be
handled by the upper layer code. */
rc = -EBADMSG; /* not processed/error retc */
}
return rc;
err_exit:
__genwqe_purge_ddcb(cd, req);
if (ddcb_requ_collect_debug_data(req)) {
if (copy_to_user((struct genwqe_debug_data __user *)
(unsigned long)cmd->ddata_addr,
&req->debug_data,
sizeof(struct genwqe_debug_data)))
return -EFAULT;
}
return rc;
}
/**
* genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
* @cd: pointer to genwqe device descriptor
*
* We use this as condition for our wait-queue code.
*/
static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
{
unsigned long flags;
struct ddcb *pddcb;
struct ddcb_queue *queue = &cd->queue;
spin_lock_irqsave(&queue->ddcb_lock, flags);
if (queue_empty(queue)) { /* empty queue */
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 1;
}
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
/**
* genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
* @cd: pointer to genwqe device descriptor
*
* Keep track on the number of DDCBs which ware currently in the
* queue. This is needed for statistics as well as condition if we want
* to wait or better do polling in case of no interrupts available.
*/
int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
{
unsigned long flags;
int ddcbs_in_flight = 0;
struct ddcb_queue *queue = &cd->queue;
spin_lock_irqsave(&queue->ddcb_lock, flags);
ddcbs_in_flight += queue->ddcbs_in_flight;
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return ddcbs_in_flight;
}
static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
{
int rc, i;
struct ddcb *pddcb;
u64 val64;
unsigned int queue_size;
struct pci_dev *pci_dev = cd->pci_dev;
if (GENWQE_DDCB_MAX < 2)
return -EINVAL;
queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
queue->ddcbs_in_flight = 0; /* statistics */
queue->ddcbs_max_in_flight = 0;
queue->ddcbs_completed = 0;
queue->return_on_busy = 0;
queue->wait_on_busy = 0;
queue->ddcb_seq = 0x100; /* start sequence number */
queue->ddcb_max = GENWQE_DDCB_MAX;
queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
&queue->ddcb_daddr);
if (queue->ddcb_vaddr == NULL) {
dev_err(&pci_dev->dev,
"[%s] **err: could not allocate DDCB **\n", __func__);
return -ENOMEM;
}
queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
GFP_KERNEL);
if (!queue->ddcb_req) {
rc = -ENOMEM;
goto free_ddcbs;
}
queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
sizeof(wait_queue_head_t),
GFP_KERNEL);
if (!queue->ddcb_waitqs) {
rc = -ENOMEM;
goto free_requs;
}
for (i = 0; i < queue->ddcb_max; i++) {
pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
pddcb->retc_16 = cpu_to_be16(0xfff);
queue->ddcb_req[i] = NULL; /* requests */
init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
}
queue->ddcb_act = 0;
queue->ddcb_next = 0; /* queue is empty */
spin_lock_init(&queue->ddcb_lock);
init_waitqueue_head(&queue->busy_waitq);
val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
__genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
__genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
__genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
__genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
return 0;
free_requs:
kfree(queue->ddcb_req);
queue->ddcb_req = NULL;
free_ddcbs:
__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
queue->ddcb_daddr);
queue->ddcb_vaddr = NULL;
queue->ddcb_daddr = 0ull;
return rc;
}
static int ddcb_queue_initialized(struct ddcb_queue *queue)
{
return queue->ddcb_vaddr != NULL;
}
static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
{
unsigned int queue_size;
queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
kfree(queue->ddcb_req);
queue->ddcb_req = NULL;
if (queue->ddcb_vaddr) {
__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
queue->ddcb_daddr);
queue->ddcb_vaddr = NULL;
queue->ddcb_daddr = 0ull;
}
}
static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
{
u64 gfir;
struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
struct pci_dev *pci_dev = cd->pci_dev;
/*
* In case of fatal FIR error the queue is stopped, such that
* we can safely check it without risking anything.
*/
cd->irqs_processed++;
wake_up_interruptible(&cd->queue_waitq);
/*
* Checking for errors before kicking the queue might be
* safer, but slower for the good-case ... See above.
*/
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
if (((gfir & GFIR_ERR_TRIGGER) != 0x0) &&
!pci_channel_offline(pci_dev)) {
if (cd->use_platform_recovery) {
/*
* Since we use raw accessors, EEH errors won't be
* detected by the platform until we do a non-raw
* MMIO or config space read
*/
readq(cd->mmio + IO_SLC_CFGREG_GFIR);
/* Don't do anything if the PCI channel is frozen */
if (pci_channel_offline(pci_dev))
goto exit;
}
wake_up_interruptible(&cd->health_waitq);
/*
* By default GFIRs causes recovery actions. This
* count is just for debug when recovery is masked.
*/
dev_err_ratelimited(&pci_dev->dev,
"[%s] GFIR=%016llx\n",
__func__, gfir);
}
exit:
return IRQ_HANDLED;
}
static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
{
struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
cd->irqs_processed++;
wake_up_interruptible(&cd->queue_waitq);
return IRQ_HANDLED;
}
/**
* genwqe_card_thread() - Work thread for the DDCB queue
* @data: pointer to genwqe device descriptor
*
* The idea is to check if there are DDCBs in processing. If there are
* some finished DDCBs, we process them and wakeup the
* requestors. Otherwise we give other processes time using
* cond_resched().
*/
static int genwqe_card_thread(void *data)
{
int should_stop = 0;
struct genwqe_dev *cd = (struct genwqe_dev *)data;
while (!kthread_should_stop()) {
genwqe_check_ddcb_queue(cd, &cd->queue);
if (GENWQE_POLLING_ENABLED) {
wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_ddcbs_in_flight(cd) ||
(should_stop = kthread_should_stop()), 1);
} else {
wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_next_ddcb_ready(cd) ||
(should_stop = kthread_should_stop()), HZ);
}
if (should_stop)
break;
/*
* Avoid soft lockups on heavy loads; we do not want
* to disable our interrupts.
*/
cond_resched();
}
return 0;
}
/**
* genwqe_setup_service_layer() - Setup DDCB queue
* @cd: pointer to genwqe device descriptor
*
* Allocate DDCBs. Configure Service Layer Controller (SLC).
*
* Return: 0 success
*/
int genwqe_setup_service_layer(struct genwqe_dev *cd)
{
int rc;
struct ddcb_queue *queue;
struct pci_dev *pci_dev = cd->pci_dev;
if (genwqe_is_privileged(cd)) {
rc = genwqe_card_reset(cd);
if (rc < 0) {
dev_err(&pci_dev->dev,
"[%s] err: reset failed.\n", __func__);
return rc;
}
genwqe_read_softreset(cd);
}
queue = &cd->queue;
queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
rc = setup_ddcb_queue(cd, queue);
if (rc != 0) {
rc = -ENODEV;
goto err_out;
}
init_waitqueue_head(&cd->queue_waitq);
cd->card_thread = kthread_run(genwqe_card_thread, cd,
GENWQE_DEVNAME "%d_thread",
cd->card_idx);
if (IS_ERR(cd->card_thread)) {
rc = PTR_ERR(cd->card_thread);
cd->card_thread = NULL;
goto stop_free_queue;
}
rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
if (rc)
goto stop_kthread;
/*
* We must have all wait-queues initialized when we enable the
* interrupts. Otherwise we might crash if we get an early
* irq.
*/
init_waitqueue_head(&cd->health_waitq);
if (genwqe_is_privileged(cd)) {
rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
GENWQE_DEVNAME, cd);
} else {
rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
GENWQE_DEVNAME, cd);
}
if (rc < 0) {
dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
goto stop_irq_cap;
}
cd->card_state = GENWQE_CARD_USED;
return 0;
stop_irq_cap:
genwqe_reset_interrupt_capability(cd);
stop_kthread:
kthread_stop(cd->card_thread);
cd->card_thread = NULL;
stop_free_queue:
free_ddcb_queue(cd, queue);
err_out:
return rc;
}
/**
* queue_wake_up_all() - Handles fatal error case
* @cd: pointer to genwqe device descriptor
*
* The PCI device got unusable and we have to stop all pending
* requests as fast as we can. The code after this must purge the
* DDCBs in question and ensure that all mappings are freed.
*/
static int queue_wake_up_all(struct genwqe_dev *cd)
{
unsigned int i;
unsigned long flags;
struct ddcb_queue *queue = &cd->queue;
spin_lock_irqsave(&queue->ddcb_lock, flags);
for (i = 0; i < queue->ddcb_max; i++)
wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
wake_up_interruptible(&queue->busy_waitq);
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
/**
* genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
* @cd: pointer to genwqe device descriptor
*
* Relies on the pre-condition that there are no users of the card
* device anymore e.g. with open file-descriptors.
*
* This function must be robust enough to be called twice.
*/
int genwqe_finish_queue(struct genwqe_dev *cd)
{
int i, rc = 0, in_flight;
int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_queue *queue = &cd->queue;
if (!ddcb_queue_initialized(queue))
return 0;
/* Do not wipe out the error state. */
if (cd->card_state == GENWQE_CARD_USED)
cd->card_state = GENWQE_CARD_UNUSED;
/* Wake up all requests in the DDCB queue such that they
should be removed nicely. */
queue_wake_up_all(cd);
/* We must wait to get rid of the DDCBs in flight */
for (i = 0; i < waitmax; i++) {
in_flight = genwqe_ddcbs_in_flight(cd);
if (in_flight == 0)
break;
dev_dbg(&pci_dev->dev,
" DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n",
i, waitmax, in_flight);
/*
* Severe severe error situation: The card itself has
* 16 DDCB queues, each queue has e.g. 32 entries,
* each DDBC has a hardware timeout of currently 250
* msec but the PFs have a hardware timeout of 8 sec
* ... so I take something large.
*/
msleep(1000);
}
if (i == waitmax) {
dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
__func__);
rc = -EIO;
}
return rc;
}
/**
* genwqe_release_service_layer() - Shutdown DDCB queue
* @cd: genwqe device descriptor
*
* This function must be robust enough to be called twice.
*/
int genwqe_release_service_layer(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
if (!ddcb_queue_initialized(&cd->queue))
return 1;
free_irq(pci_dev->irq, cd);
genwqe_reset_interrupt_capability(cd);
if (cd->card_thread != NULL) {
kthread_stop(cd->card_thread);
cd->card_thread = NULL;
}
free_ddcb_queue(cd, &cd->queue);
return 0;
}
|
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_USER_H_
#define ADF_CFG_USER_H_
#include "adf_cfg_common.h"
#include "adf_cfg_strings.h"
struct adf_user_cfg_key_val {
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
union {
struct adf_user_cfg_key_val *next;
__u64 padding3;
};
enum adf_cfg_val_type type;
} __packed;
struct adf_user_cfg_section {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
union {
struct adf_user_cfg_key_val *params;
__u64 padding1;
};
union {
struct adf_user_cfg_section *next;
__u64 padding3;
};
} __packed;
struct adf_user_cfg_ctl_data {
union {
struct adf_user_cfg_section *config_section;
__u64 padding;
};
__u8 device_id;
} __packed;
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.c
*
* Samsung MFC (Multi Function Codec - FIMV) driver
* This file contains hw related functions.
*
* Kamil Debski, Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
#include "s5p_mfc_debug.h"
#include "s5p_mfc_opr.h"
#include "s5p_mfc_opr_v5.h"
#include "s5p_mfc_opr_v6.h"
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
{
if (IS_MFCV6_PLUS(dev)) {
dev->mfc_ops = s5p_mfc_init_hw_ops_v6();
dev->warn_start = S5P_FIMV_ERR_WARNINGS_START_V6;
} else {
dev->mfc_ops = s5p_mfc_init_hw_ops_v5();
dev->warn_start = S5P_FIMV_ERR_WARNINGS_START;
}
}
void s5p_mfc_init_regs(struct s5p_mfc_dev *dev)
{
if (IS_MFCV6_PLUS(dev))
dev->mfc_regs = s5p_mfc_init_regs_v6_plus(dev);
}
int s5p_mfc_alloc_priv_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
struct s5p_mfc_priv_buf *b)
{
unsigned int bits = dev->mem_size >> PAGE_SHIFT;
unsigned int count = b->size >> PAGE_SHIFT;
unsigned int align = (SZ_64K >> PAGE_SHIFT) - 1;
unsigned int start, offset;
mfc_debug(3, "Allocating priv: %zu\n", b->size);
if (dev->mem_virt) {
start = bitmap_find_next_zero_area(dev->mem_bitmap, bits, 0, count, align);
if (start > bits)
goto no_mem;
bitmap_set(dev->mem_bitmap, start, count);
offset = start << PAGE_SHIFT;
b->virt = dev->mem_virt + offset;
b->dma = dev->mem_base + offset;
} else {
struct device *mem_dev = dev->mem_dev[mem_ctx];
dma_addr_t base = dev->dma_base[mem_ctx];
b->ctx = mem_ctx;
b->virt = dma_alloc_coherent(mem_dev, b->size, &b->dma, GFP_KERNEL);
if (!b->virt)
goto no_mem;
if (b->dma < base) {
mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
&b->dma, &base);
dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
return -ENOMEM;
}
}
mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
return 0;
no_mem:
mfc_err("Allocating private buffer of size %zu failed\n", b->size);
return -ENOMEM;
}
int s5p_mfc_alloc_generic_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
struct s5p_mfc_priv_buf *b)
{
struct device *mem_dev = dev->mem_dev[mem_ctx];
mfc_debug(3, "Allocating generic buf: %zu\n", b->size);
b->ctx = mem_ctx;
b->virt = dma_alloc_coherent(mem_dev, b->size, &b->dma, GFP_KERNEL);
if (!b->virt)
goto no_mem;
mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
return 0;
no_mem:
mfc_err("Allocating generic buffer of size %zu failed\n", b->size);
return -ENOMEM;
}
void s5p_mfc_release_priv_buf(struct s5p_mfc_dev *dev,
struct s5p_mfc_priv_buf *b)
{
if (dev->mem_virt) {
unsigned int start = (b->dma - dev->mem_base) >> PAGE_SHIFT;
unsigned int count = b->size >> PAGE_SHIFT;
bitmap_clear(dev->mem_bitmap, start, count);
} else {
struct device *mem_dev = dev->mem_dev[b->ctx];
dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
}
b->virt = NULL;
b->dma = 0;
b->size = 0;
}
void s5p_mfc_release_generic_buf(struct s5p_mfc_dev *dev,
struct s5p_mfc_priv_buf *b)
{
struct device *mem_dev = dev->mem_dev[b->ctx];
dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
b->virt = NULL;
b->dma = 0;
b->size = 0;
}
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "amlcode.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsutils")
/* Local prototypes */
#ifdef ACPI_OBSOLETE_FUNCTIONS
acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ns_print_node_pathname
*
* PARAMETERS: node - Object
* message - Prefix message
*
* DESCRIPTION: Print an object's full namespace pathname
* Manages allocation/freeing of a pathname buffer
*
******************************************************************************/
void
acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
const char *message)
{
struct acpi_buffer buffer;
acpi_status status;
if (!node) {
acpi_os_printf("[NULL NAME]");
return;
}
/* Convert handle to full pathname and print it (with supplied message) */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
if (ACPI_SUCCESS(status)) {
if (message) {
acpi_os_printf("%s ", message);
}
acpi_os_printf("%s", (char *)buffer.pointer);
ACPI_FREE(buffer.pointer);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_type
*
* PARAMETERS: node - Parent Node to be examined
*
* RETURN: Type field from Node whose handle is passed
*
* DESCRIPTION: Return the type of a Namespace node
*
******************************************************************************/
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
{
ACPI_FUNCTION_TRACE(ns_get_type);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
return_UINT8(ACPI_TYPE_ANY);
}
return_UINT8(node->type);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_local
*
* PARAMETERS: type - A namespace object type
*
* RETURN: LOCAL if names must be found locally in objects of the
* passed type, 0 if enclosing scopes should be searched
*
* DESCRIPTION: Returns scope rule for the given object type.
*
******************************************************************************/
u32 acpi_ns_local(acpi_object_type type)
{
ACPI_FUNCTION_TRACE(ns_local);
if (!acpi_ut_valid_object_type(type)) {
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
return_UINT32(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_internal_name_length
*
* PARAMETERS: info - Info struct initialized with the
* external name pointer.
*
* RETURN: None
*
* DESCRIPTION: Calculate the length of the internal (AML) namestring
* corresponding to the external (ASL) namestring.
*
******************************************************************************/
void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
{
const char *next_external_char;
u32 i;
ACPI_FUNCTION_ENTRY();
next_external_char = info->external_name;
info->num_carats = 0;
info->num_segments = 0;
info->fully_qualified = FALSE;
/*
* For the internal name, the required length is 4 bytes per segment,
* plus 1 each for root_prefix, multi_name_prefix_op, segment count,
* trailing null (which is not really needed, but no there's harm in
* putting it there)
*
* strlen() + 1 covers the first name_seg, which has no path separator
*/
if (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
info->fully_qualified = TRUE;
next_external_char++;
/* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
while (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
next_external_char++;
}
} else {
/* Handle Carat prefixes */
while (ACPI_IS_PARENT_PREFIX(*next_external_char)) {
info->num_carats++;
next_external_char++;
}
}
/*
* Determine the number of ACPI name "segments" by counting the number of
* path separators within the string. Start with one segment since the
* segment count is [(# separators) + 1], and zero separators is ok.
*/
if (*next_external_char) {
info->num_segments = 1;
for (i = 0; next_external_char[i]; i++) {
if (ACPI_IS_PATH_SEPARATOR(next_external_char[i])) {
info->num_segments++;
}
}
}
info->length = (ACPI_NAMESEG_SIZE * info->num_segments) +
4 + info->num_carats;
info->next_external_char = next_external_char;
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_build_internal_name
*
* PARAMETERS: info - Info struct fully initialized
*
* RETURN: Status
*
* DESCRIPTION: Construct the internal (AML) namestring
* corresponding to the external (ASL) namestring.
*
******************************************************************************/
acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
{
u32 num_segments = info->num_segments;
char *internal_name = info->internal_name;
const char *external_name = info->next_external_char;
char *result = NULL;
u32 i;
ACPI_FUNCTION_TRACE(ns_build_internal_name);
/* Setup the correct prefixes, counts, and pointers */
if (info->fully_qualified) {
internal_name[0] = AML_ROOT_PREFIX;
if (num_segments <= 1) {
result = &internal_name[1];
} else if (num_segments == 2) {
internal_name[1] = AML_DUAL_NAME_PREFIX;
result = &internal_name[2];
} else {
internal_name[1] = AML_MULTI_NAME_PREFIX;
internal_name[2] = (char)num_segments;
result = &internal_name[3];
}
} else {
/*
* Not fully qualified.
* Handle Carats first, then append the name segments
*/
i = 0;
if (info->num_carats) {
for (i = 0; i < info->num_carats; i++) {
internal_name[i] = AML_PARENT_PREFIX;
}
}
if (num_segments <= 1) {
result = &internal_name[i];
} else if (num_segments == 2) {
internal_name[i] = AML_DUAL_NAME_PREFIX;
result = &internal_name[(acpi_size)i + 1];
} else {
internal_name[i] = AML_MULTI_NAME_PREFIX;
internal_name[(acpi_size)i + 1] = (char)num_segments;
result = &internal_name[(acpi_size)i + 2];
}
}
/* Build the name (minus path separators) */
for (; num_segments; num_segments--) {
for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
/* Pad the segment with underscore(s) if segment is short */
result[i] = '_';
} else {
/* Convert the character to uppercase and save it */
result[i] = (char)toupper((int)*external_name);
external_name++;
}
}
/* Now we must have a path separator, or the pathname is bad */
if (!ACPI_IS_PATH_SEPARATOR(*external_name) &&
(*external_name != 0)) {
return_ACPI_STATUS(AE_BAD_PATHNAME);
}
/* Move on the next segment */
external_name++;
result += ACPI_NAMESEG_SIZE;
}
/* Terminate the string */
*result = 0;
if (info->fully_qualified) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Returning [%p] (abs) \"\\%s\"\n",
internal_name, internal_name));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n",
internal_name, internal_name));
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_internalize_name
*
* PARAMETERS: *external_name - External representation of name
* **Converted name - Where to return the resulting
* internal represention of the name
*
* RETURN: Status
*
* DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0")
* to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
*
*******************************************************************************/
acpi_status
acpi_ns_internalize_name(const char *external_name, char **converted_name)
{
char *internal_name;
struct acpi_namestring_info info;
acpi_status status;
ACPI_FUNCTION_TRACE(ns_internalize_name);
if ((!external_name) || (*external_name == 0) || (!converted_name)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Get the length of the new internal name */
info.external_name = external_name;
acpi_ns_get_internal_name_length(&info);
/* We need a segment to store the internal name */
internal_name = ACPI_ALLOCATE_ZEROED(info.length);
if (!internal_name) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Build the name */
info.internal_name = internal_name;
status = acpi_ns_build_internal_name(&info);
if (ACPI_FAILURE(status)) {
ACPI_FREE(internal_name);
return_ACPI_STATUS(status);
}
*converted_name = internal_name;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_externalize_name
*
* PARAMETERS: internal_name_length - Length of the internal name below
* internal_name - Internal representation of name
* converted_name_length - Where the length is returned
* converted_name - Where the resulting external name
* is returned
*
* RETURN: Status
*
* DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
* to its external (printable) form (e.g. "\_PR_.CPU0")
*
******************************************************************************/
acpi_status
acpi_ns_externalize_name(u32 internal_name_length,
const char *internal_name,
u32 * converted_name_length, char **converted_name)
{
u32 names_index = 0;
u32 num_segments = 0;
u32 required_length;
u32 prefix_length = 0;
u32 i = 0;
u32 j = 0;
ACPI_FUNCTION_TRACE(ns_externalize_name);
if (!internal_name_length || !internal_name || !converted_name) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Check for a prefix (one '\' | one or more '^') */
switch (internal_name[0]) {
case AML_ROOT_PREFIX:
prefix_length = 1;
break;
case AML_PARENT_PREFIX:
for (i = 0; i < internal_name_length; i++) {
if (ACPI_IS_PARENT_PREFIX(internal_name[i])) {
prefix_length = i + 1;
} else {
break;
}
}
if (i == internal_name_length) {
prefix_length = i;
}
break;
default:
break;
}
/*
* Check for object names. Note that there could be 0-255 of these
* 4-byte elements.
*/
if (prefix_length < internal_name_length) {
switch (internal_name[prefix_length]) {
case AML_MULTI_NAME_PREFIX:
/* <count> 4-byte names */
names_index = prefix_length + 2;
num_segments = (u8)
internal_name[(acpi_size)prefix_length + 1];
break;
case AML_DUAL_NAME_PREFIX:
/* Two 4-byte names */
names_index = prefix_length + 1;
num_segments = 2;
break;
case 0:
/* null_name */
names_index = 0;
num_segments = 0;
break;
default:
/* one 4-byte name */
names_index = prefix_length;
num_segments = 1;
break;
}
}
/*
* Calculate the length of converted_name, which equals the length
* of the prefix, length of all object names, length of any required
* punctuation ('.') between object names, plus the NULL terminator.
*/
required_length = prefix_length + (4 * num_segments) +
((num_segments > 0) ? (num_segments - 1) : 0) + 1;
/*
* Check to see if we're still in bounds. If not, there's a problem
* with internal_name (invalid format).
*/
if (required_length > internal_name_length) {
ACPI_ERROR((AE_INFO, "Invalid internal name"));
return_ACPI_STATUS(AE_BAD_PATHNAME);
}
/* Build the converted_name */
*converted_name = ACPI_ALLOCATE_ZEROED(required_length);
if (!(*converted_name)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
j = 0;
for (i = 0; i < prefix_length; i++) {
(*converted_name)[j++] = internal_name[i];
}
if (num_segments > 0) {
for (i = 0; i < num_segments; i++) {
if (i > 0) {
(*converted_name)[j++] = '.';
}
/* Copy and validate the 4-char name segment */
ACPI_COPY_NAMESEG(&(*converted_name)[j],
&internal_name[names_index]);
acpi_ut_repair_name(&(*converted_name)[j]);
j += ACPI_NAMESEG_SIZE;
names_index += ACPI_NAMESEG_SIZE;
}
}
if (converted_name_length) {
*converted_name_length = (u32) required_length;
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_validate_handle
*
* PARAMETERS: handle - Handle to be validated and typecast to a
* namespace node.
*
* RETURN: A pointer to a namespace node
*
* DESCRIPTION: Convert a namespace handle to a namespace node. Handles special
* cases for the root node.
*
* NOTE: Real integer handles would allow for more verification
* and keep all pointers within this subsystem - however this introduces
* more overhead and has not been necessary to this point. Drivers
* holding handles are typically notified before a node becomes invalid
* due to a table unload.
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
{
ACPI_FUNCTION_ENTRY();
/* Parameter validation */
if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
return (acpi_gbl_root_node);
}
/* We can at least attempt to verify the handle */
if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) {
return (NULL);
}
return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_terminate
*
* PARAMETERS: none
*
* RETURN: none
*
* DESCRIPTION: free memory allocated for namespace and ACPI table storage.
*
******************************************************************************/
void acpi_ns_terminate(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ns_terminate);
/*
* Free the entire namespace -- all nodes and all objects
* attached to the nodes
*/
acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
/* Delete any objects attached to the root node */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_VOID;
}
acpi_ns_delete_node(acpi_gbl_root_node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_opens_scope
*
* PARAMETERS: type - A valid namespace type
*
* RETURN: NEWSCOPE if the passed type "opens a name scope" according
* to the ACPI specification, else 0
*
******************************************************************************/
u32 acpi_ns_opens_scope(acpi_object_type type)
{
ACPI_FUNCTION_ENTRY();
if (type > ACPI_TYPE_LOCAL_MAX) {
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return (ACPI_NS_NORMAL);
}
return (((u32)acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_node_unlocked
*
* PARAMETERS: *pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
* flags - Used to indicate whether to perform upsearch or
* not.
* return_node - Where the Node is returned
*
* DESCRIPTION: Look up a name relative to a given scope and return the
* corresponding Node. NOTE: Scope can be null.
*
* MUTEX: Doesn't locks namespace
*
******************************************************************************/
acpi_status
acpi_ns_get_node_unlocked(struct acpi_namespace_node *prefix_node,
const char *pathname,
u32 flags, struct acpi_namespace_node **return_node)
{
union acpi_generic_state scope_info;
acpi_status status;
char *internal_path;
ACPI_FUNCTION_TRACE_PTR(ns_get_node_unlocked,
ACPI_CAST_PTR(char, pathname));
/* Simplest case is a null pathname */
if (!pathname) {
*return_node = prefix_node;
if (!prefix_node) {
*return_node = acpi_gbl_root_node;
}
return_ACPI_STATUS(AE_OK);
}
/* Quick check for a reference to the root */
if (ACPI_IS_ROOT_PREFIX(pathname[0]) && (!pathname[1])) {
*return_node = acpi_gbl_root_node;
return_ACPI_STATUS(AE_OK);
}
/* Convert path to internal representation */
status = acpi_ns_internalize_name(pathname, &internal_path);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Setup lookup scope (search starting point) */
scope_info.scope.node = prefix_node;
/* Lookup the name in the namespace */
status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE,
(flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
return_node);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s, %s\n",
pathname, acpi_format_exception(status)));
}
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_node
*
* PARAMETERS: *pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
* flags - Used to indicate whether to perform upsearch or
* not.
* return_node - Where the Node is returned
*
* DESCRIPTION: Look up a name relative to a given scope and return the
* corresponding Node. NOTE: Scope can be null.
*
* MUTEX: Locks namespace
*
******************************************************************************/
acpi_status
acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
const char *pathname,
u32 flags, struct acpi_namespace_node **return_node)
{
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname));
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ns_get_node_unlocked(prefix_node, pathname,
flags, return_node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_40xx_reg.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIM_SAFE_ENABLE 0xf1d0dead
#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
#define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
#define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
#define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
#define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static int wait_for_ip_bar(struct ivpu_device *vdev)
{
return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static void host_ss_rst_clr(struct ivpu_device *vdev)
{
u32 val = 0;
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
}
static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
else
return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
}
static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
else
return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
}
static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
else
return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
}
static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return top_noc_qrenqn_check_37xx(vdev, exp_val);
else
return top_noc_qrenqn_check_40xx(vdev, exp_val);
}
int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
{
int ret;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
ret = wait_for_ip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
return ret;
}
host_ss_rst_clr(vdev);
}
ret = host_ss_noc_qreqn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
return ret;
}
ret = host_ss_noc_qacceptn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = host_ss_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check %d\n", ret);
return ret;
}
static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
}
static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
}
void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
idle_gen_drive_37xx(vdev, true);
else
idle_gen_drive_40xx(vdev, true);
}
void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
idle_gen_drive_37xx(vdev, false);
else
idle_gen_drive_40xx(vdev, false);
}
static void
pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
{
u32 val;
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
}
static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static void pwr_island_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
pwr_island_trickle_drive_37xx(vdev, true);
ndelay(500);
pwr_island_drive_37xx(vdev, true);
} else {
pwr_island_trickle_drive_40xx(vdev, true);
ndelay(500);
pwr_island_drive_40xx(vdev, true);
}
}
static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
{
if (IVPU_WA(punit_disabled))
return 0;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
PWR_ISLAND_STATUS_TIMEOUT_US);
else
return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
pwr_island_isolation_drive_37xx(vdev, enable);
else
pwr_island_isolation_drive_40xx(vdev, enable);
}
static void pwr_island_isolation_disable(struct ivpu_device *vdev)
{
pwr_island_isolation_drive(vdev, false);
}
static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
if (enable) {
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
}
REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
}
static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
if (enable) {
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
}
REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
}
static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
host_ss_clk_drive_37xx(vdev, enable);
else
host_ss_clk_drive_40xx(vdev, enable);
}
static void host_ss_clk_enable(struct ivpu_device *vdev)
{
host_ss_clk_drive(vdev, true);
}
static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
if (enable) {
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
}
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
}
static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
if (enable) {
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
}
REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
}
static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
host_ss_rst_drive_37xx(vdev, enable);
else
host_ss_rst_drive_40xx(vdev, enable);
}
static void host_ss_rst_enable(struct ivpu_device *vdev)
{
host_ss_rst_drive(vdev, true);
}
static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
}
static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
}
static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
else
host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
}
static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
return ret;
}
ret = host_ss_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
return ret;
}
static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
if (enable) {
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
} else {
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
}
REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
}
static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
if (enable) {
val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
} else {
val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
}
REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
}
static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
top_noc_qreqn_drive_37xx(vdev, enable);
else
top_noc_qreqn_drive_40xx(vdev, enable);
}
int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
{
return host_ss_axi_drive(vdev, true);
}
static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return top_noc_qacceptn_check_37xx(vdev, exp_val);
else
return top_noc_qacceptn_check_40xx(vdev, exp_val);
}
static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return top_noc_qdeny_check_37xx(vdev, exp_val);
else
return top_noc_qdeny_check_40xx(vdev, exp_val);
}
static int top_noc_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
top_noc_qreqn_drive(vdev, enable);
ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
return ret;
}
ret = top_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
return ret;
}
int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
{
return top_noc_drive(vdev, true);
}
static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
static void pwr_island_delay_set(struct ivpu_device *vdev)
{
bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
u32 post, post1, post2, status;
if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
return;
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_PTL_P:
post = high ? 18 : 0;
post1 = 0;
post2 = 0;
status = high ? 46 : 3;
break;
default:
dump_stack();
ivpu_err(vdev, "Unknown device ID\n");
return;
}
pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
}
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
pwr_island_delay_set(vdev);
pwr_island_enable(vdev);
ret = wait_for_pwr_island_status(vdev, 0x1);
if (ret) {
ivpu_err(vdev, "Timed out waiting for power island status\n");
return ret;
}
ret = top_noc_qreqn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
return ret;
}
host_ss_clk_enable(vdev);
pwr_island_isolation_disable(vdev);
host_ss_rst_enable(vdev);
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
dpu_active_drive_37xx(vdev, true);
return ret;
}
u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
else
return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
}
static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
if (ivpu_is_force_snoop_enabled(vdev))
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
else
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
if (ivpu_is_force_snoop_enabled(vdev))
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return ivpu_hw_ip_snoop_disable_37xx(vdev);
else
return ivpu_hw_ip_snoop_disable_40xx(vdev);
}
static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
}
static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
}
void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
else
return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
}
static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
{
u32 val;
val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = vdev->fw->entry_point >> 9;
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
return 0;
}
static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
return -EIO;
return 0;
}
static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
return -EIO;
return 0;
}
static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
if (enable)
val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
else
val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
}
static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
{
int ret;
cpu_noc_top_mmio_drive_40xx(vdev, enable);
ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
return ret;
}
static int soc_cpu_enable(struct ivpu_device *vdev)
{
return soc_cpu_drive_40xx(vdev, true);
}
static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
{
int ret;
u32 val;
u64 val64;
ret = soc_cpu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
return ret;
}
val64 = vdev->fw->entry_point;
val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
return 0;
}
int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return soc_cpu_boot_37xx(vdev);
else
return soc_cpu_boot_40xx(vdev);
}
static void wdt_disable_37xx(struct ivpu_device *vdev)
{
u32 val;
/* Enable writing and set non-zero WDT value */
REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
/* Enable writing and disable watchdog timer */
REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
/* Now clear the timeout interrupt */
val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
}
static void wdt_disable_40xx(struct ivpu_device *vdev)
{
u32 val;
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
}
void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return wdt_disable_37xx(vdev);
else
return wdt_disable_40xx(vdev);
}
static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
{
u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
{
u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return ipc_rx_count_get_37xx(vdev);
else
return ipc_rx_count_get_40xx(vdev);
}
void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
} else {
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
}
}
void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
} else {
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
}
}
static void diagnose_failure_37xx(struct ivpu_device *vdev)
{
u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
if (ipc_rx_count_get_37xx(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
ivpu_err(vdev, "WDT MSS timeout detected\n");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
ivpu_err(vdev, "WDT NCE timeout detected\n");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
ivpu_err(vdev, "NOC Firewall irq detected\n");
}
static void diagnose_failure_40xx(struct ivpu_device *vdev)
{
u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
if (ipc_rx_count_get_40xx(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
ivpu_err(vdev, "WDT MSS timeout detected\n");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
ivpu_err(vdev, "WDT NCE timeout detected\n");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
ivpu_err(vdev, "NOC Firewall irq detected\n");
}
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
diagnose_failure_37xx(vdev);
else
diagnose_failure_40xx(vdev);
}
void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
else
REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
}
static void irq_wdt_nce_handler(struct ivpu_device *vdev)
{
ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_hw_ip_wdt_disable(vdev);
ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
{
atomic_inc(&vdev->hw->firewall_irq_counter);
ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
atomic_read(&vdev->hw->firewall_irq_counter));
}
/* Handler for IRQs from NPU core */
bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
{
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
if (!status)
return false;
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
irq_noc_firewall_handler(vdev);
return true;
}
/* Handler for IRQs from NPU core */
bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
{
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
if (!status)
return false;
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
irq_noc_firewall_handler(vdev);
return true;
}
static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
db_set_37xx(vdev, db_id);
else
db_set_40xx(vdev, db_id);
}
u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
else
return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
else
REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* clkgen-mux.c: ST GEN-MUX Clock driver
*
* Copyright (C) 2014 STMicroelectronics (R&D) Limited
*
* Authors: Stephen Gallimore <[email protected]>
* Pankaj Dev <[email protected]>
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include "clkgen.h"
static const char ** __init clkgen_mux_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
unsigned int nparents;
nparents = of_clk_get_parent_count(np);
if (WARN_ON(!nparents))
return ERR_PTR(-EINVAL);
parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
if (!parents)
return ERR_PTR(-ENOMEM);
*num_parents = of_clk_parent_fill(np, parents, nparents);
return parents;
}
struct clkgen_mux_data {
u32 offset;
u8 shift;
u8 width;
spinlock_t *lock;
unsigned long clk_flags;
u8 mux_flags;
};
static struct clkgen_mux_data stih407_a9_mux_data = {
.offset = 0x1a4,
.shift = 0,
.width = 2,
.lock = &clkgen_a9_lock,
};
static void __init st_of_clkgen_mux_setup(struct device_node *np,
struct clkgen_mux_data *data)
{
struct clk *clk;
void __iomem *reg;
const char **parents;
int num_parents = 0;
struct device_node *parent_np;
/*
* First check for reg property within the node to keep backward
* compatibility, then if reg doesn't exist look at the parent node
*/
reg = of_iomap(np, 0);
if (!reg) {
parent_np = of_get_parent(np);
reg = of_iomap(parent_np, 0);
of_node_put(parent_np);
if (!reg) {
pr_err("%s: Failed to get base address\n", __func__);
return;
}
}
parents = clkgen_mux_get_parents(np, &num_parents);
if (IS_ERR(parents)) {
pr_err("%s: Failed to get parents (%ld)\n",
__func__, PTR_ERR(parents));
goto err_parents;
}
clk = clk_register_mux(NULL, np->name, parents, num_parents,
data->clk_flags | CLK_SET_RATE_PARENT,
reg + data->offset,
data->shift, data->width, data->mux_flags,
data->lock);
if (IS_ERR(clk))
goto err;
pr_debug("%s: parent %s rate %u\n",
__clk_get_name(clk),
__clk_get_name(clk_get_parent(clk)),
(unsigned int)clk_get_rate(clk));
kfree(parents);
of_clk_add_provider(np, of_clk_src_simple_get, clk);
return;
err:
kfree(parents);
err_parents:
iounmap(reg);
}
static void __init st_of_clkgen_a9_mux_setup(struct device_node *np)
{
st_of_clkgen_mux_setup(np, &stih407_a9_mux_data);
}
CLK_OF_DECLARE(clkgen_a9mux, "st,stih407-clkgen-a9-mux",
st_of_clkgen_a9_mux_setup);
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ENERGY_MODEL_H
#define _LINUX_ENERGY_MODEL_H
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/rcupdate.h>
#include <linux/sched/cpufreq.h>
#include <linux/sched/topology.h>
#include <linux/types.h>
/**
* struct em_perf_state - Performance state of a performance domain
* @performance: CPU performance (capacity) at a given frequency
* @frequency: The frequency in KHz, for consistency with CPUFreq
* @power: The power consumed at this level (by 1 CPU or by a registered
* device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
* @flags: see "em_perf_state flags" description below.
*/
struct em_perf_state {
unsigned long performance;
unsigned long frequency;
unsigned long power;
unsigned long cost;
unsigned long flags;
};
/*
* em_perf_state flags:
*
* EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
* in this em_perf_domain, another performance state with a higher frequency
* but a lower or equal power cost. Such inefficient states are ignored when
* using em_pd_get_efficient_*() functions.
*/
#define EM_PERF_STATE_INEFFICIENT BIT(0)
/**
* struct em_perf_table - Performance states table
* @rcu: RCU used for safe access and destruction
* @kref: Reference counter to track the users
* @state: List of performance states, in ascending order
*/
struct em_perf_table {
struct rcu_head rcu;
struct kref kref;
struct em_perf_state state[];
};
/**
* struct em_perf_domain - Performance domain
* @em_table: Pointer to the runtime modifiable em_perf_table
* @nr_perf_states: Number of performance states
* @min_perf_state: Minimum allowed Performance State index
* @max_perf_state: Maximum allowed Performance State index
* @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
* misses during energy calculations in the scheduler
* and simplifies allocating/freeing that memory region.
*
* In case of CPU device, a "performance domain" represents a group of CPUs
* whose performance is scaled together. All CPUs of a performance domain
* must have the same micro-architecture. Performance domains often have
* a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
* field is unused.
*/
struct em_perf_domain {
struct em_perf_table __rcu *em_table;
int nr_perf_states;
int min_perf_state;
int max_perf_state;
unsigned long flags;
unsigned long cpus[];
};
/*
* em_perf_domain flags:
*
* EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
* other scale.
*
* EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
* energy consumption.
*
* EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
* created by platform missing real power information
*/
#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
#define em_span_cpus(em) (to_cpumask((em)->cpus))
#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
#ifdef CONFIG_ENERGY_MODEL
/*
* The max power value in micro-Watts. The limit of 64 Watts is set as
* a safety net to not overflow multiplications on 32bit platforms. The
* 32bit value limit for total Perf Domain power implies a limit of
* maximum CPUs in such domain to 64.
*/
#define EM_MAX_POWER (64000000) /* 64 Watts */
/*
* To avoid possible energy estimation overflow on 32bit machines add
* limits to number of CPUs in the Perf. Domain.
* We are safe on 64bit machine, thus some big number.
*/
#ifdef CONFIG_64BIT
#define EM_MAX_NUM_CPUS 4096
#else
#define EM_MAX_NUM_CPUS 16
#endif
struct em_data_callback {
/**
* active_power() - Provide power at the next performance state of
* a device
* @dev : Device for which we do this operation (can be a CPU)
* @power : Active power at the performance state
* (modified)
* @freq : Frequency at the performance state in kHz
* (modified)
*
* active_power() must find the lowest performance state of 'dev' above
* 'freq' and update 'power' and 'freq' to the matching active power
* and frequency.
*
* In case of CPUs, the power is the one of a single CPU in the domain,
* expressed in micro-Watts or an abstract scale. It is expected to
* fit in the [0, EM_MAX_POWER] range.
*
* Return 0 on success.
*/
int (*active_power)(struct device *dev, unsigned long *power,
unsigned long *freq);
/**
* get_cost() - Provide the cost at the given performance state of
* a device
* @dev : Device for which we do this operation (can be a CPU)
* @freq : Frequency at the performance state in kHz
* @cost : The cost value for the performance state
* (modified)
*
* In case of CPUs, the cost is the one of a single CPU in the domain.
* It is expected to fit in the [0, EM_MAX_POWER] range due to internal
* usage in EAS calculation.
*
* Return 0 on success, or appropriate error value in case of failure.
*/
int (*get_cost)(struct device *dev, unsigned long freq,
unsigned long *cost);
};
#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
{ .active_power = _active_power_cb, \
.get_cost = _cost_cb }
#define EM_DATA_CB(_active_power_cb) \
EM_ADV_DATA_CB(_active_power_cb, NULL)
struct em_perf_domain *em_cpu_get(int cpu);
struct em_perf_domain *em_pd_get(struct device *dev);
int em_dev_update_perf_domain(struct device *dev,
struct em_perf_table __rcu *new_table);
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
struct em_data_callback *cb, cpumask_t *span,
bool microwatts);
void em_dev_unregister_perf_domain(struct device *dev);
struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
void em_table_free(struct em_perf_table __rcu *table);
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states);
int em_dev_update_chip_binning(struct device *dev);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
* @table: List of performance states, in ascending order
* @pd: performance domain for which this must be done
* @max_util: Max utilization to map with the EM
*
* It is called from the scheduler code quite frequently and as a consequence
* doesn't implement any check.
*
* Return: An efficient performance state id, high enough to meet @max_util
* requirement.
*/
static inline int
em_pd_get_efficient_state(struct em_perf_state *table,
struct em_perf_domain *pd, unsigned long max_util)
{
unsigned long pd_flags = pd->flags;
int min_ps = pd->min_perf_state;
int max_ps = pd->max_perf_state;
struct em_perf_state *ps;
int i;
for (i = min_ps; i <= max_ps; i++) {
ps = &table[i];
if (ps->performance >= max_util) {
if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
ps->flags & EM_PERF_STATE_INEFFICIENT)
continue;
return i;
}
}
return max_ps;
}
/**
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
* performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
* @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
* might reflect reduced frequency (due to thermal)
*
* This function must be used only for CPU devices. There is no validation,
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from
* the scheduler code quite frequently and that is why there is not checks.
*
* Return: the sum of the energy consumed by the CPUs of the domain assuming
* a capacity state satisfying the max utilization of the domain.
*/
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
struct em_perf_table *em_table;
struct em_perf_state *ps;
int i;
#ifdef CONFIG_SCHED_DEBUG
WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
#endif
if (!sum_util)
return 0;
/*
* In order to predict the performance state, map the utilization of
* the most utilized CPU of the performance domain to a requested
* performance, like schedutil. Take also into account that the real
* performance might be set lower (due to thermal capping). Thus, clamp
* max utilization to the allowed CPU capacity before calculating
* effective performance.
*/
max_util = min(max_util, allowed_cpu_cap);
/*
* Find the lowest performance state of the Energy Model above the
* requested performance.
*/
em_table = rcu_dereference(pd->em_table);
i = em_pd_get_efficient_state(em_table->state, pd, max_util);
ps = &em_table->state[i];
/*
* The performance (capacity) of a CPU in the domain at the performance
* state (ps) can be computed as:
*
* ps->freq * scale_cpu
* ps->performance = -------------------- (1)
* cpu_max_freq
*
* So, ignoring the costs of idle states (which are not available in
* the EM), the energy consumed by this CPU at that performance state
* is estimated as:
*
* ps->power * cpu_util
* cpu_nrg = -------------------- (2)
* ps->performance
*
* since 'cpu_util / ps->performance' represents its percentage of busy
* time.
*
* NOTE: Although the result of this computation actually is in
* units of power, it can be manipulated as an energy value
* over a scheduling period, since it is assumed to be
* constant during that interval.
*
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
* of two terms:
*
* ps->power * cpu_max_freq
* cpu_nrg = ------------------------ * cpu_util (3)
* ps->freq * scale_cpu
*
* The first term is static, and is stored in the em_perf_state struct
* as 'ps->cost'.
*
* Since all CPUs of the domain have the same micro-architecture, they
* share the same 'ps->cost', and the same CPU capacity. Hence, the
* total energy of the domain (which is the simple sum of the energy of
* all of its CPUs) can be factorized as:
*
* pd_nrg = ps->cost * \Sum cpu_util (4)
*/
return ps->cost * sum_util;
}
/**
* em_pd_nr_perf_states() - Get the number of performance states of a perf.
* domain
* @pd : performance domain for which this must be done
*
* Return: the number of performance states in the performance domain table
*/
static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return pd->nr_perf_states;
}
/**
* em_perf_state_from_pd() - Get the performance states table of perf.
* domain
* @pd : performance domain for which this must be done
*
* To use this function the rcu_read_lock() should be hold. After the usage
* of the performance states table is finished, the rcu_read_unlock() should
* be called.
*
* Return: the pointer to performance states table of the performance domain
*/
static inline
struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
{
return rcu_dereference(pd->em_table)->state;
}
#else
struct em_data_callback {};
#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
#define EM_DATA_CB(_active_power_cb) { }
#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
static inline
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
struct em_data_callback *cb, cpumask_t *span,
bool microwatts)
{
return -EINVAL;
}
static inline void em_dev_unregister_perf_domain(struct device *dev)
{
}
static inline struct em_perf_domain *em_cpu_get(int cpu)
{
return NULL;
}
static inline struct em_perf_domain *em_pd_get(struct device *dev)
{
return NULL;
}
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
return 0;
}
static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
static inline
struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
{
return NULL;
}
static inline void em_table_free(struct em_perf_table __rcu *table) {}
static inline
int em_dev_update_perf_domain(struct device *dev,
struct em_perf_table __rcu *new_table)
{
return -EINVAL;
}
static inline
struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
{
return NULL;
}
static inline
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states)
{
return -EINVAL;
}
static inline int em_dev_update_chip_binning(struct device *dev)
{
return -EINVAL;
}
static inline
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz)
{
return -EINVAL;
}
#endif
#endif
|
// SPDX-License-Identifier: GPL-2.0+
#include <linux/ptp_classify.h>
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
#define LAN966X_MAX_PTP_ID 512
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
* The value is calculated as following: (1/1000000)/((2^-59)/6.037735849)
*/
#define LAN966X_1PPM_FORMAT 3480517749723LL
/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference
* The value is calculated as following: (1/1000000000)/((2^59)/6.037735849)
*/
#define LAN966X_1PPB_FORMAT 3480517749LL
#define TOD_ACC_PIN 0x7
/* This represents the base rule ID for the PTP rules that are added in the
* VCAP to trap frames to CPU. This number needs to be bigger than the maximum
* number of entries that can exist in the VCAP.
*/
#define LAN966X_VCAP_PTP_RULE_ID 1000000
#define LAN966X_VCAP_L2_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 0)
#define LAN966X_VCAP_IPV4_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 1)
#define LAN966X_VCAP_IPV4_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 2)
#define LAN966X_VCAP_IPV6_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 3)
#define LAN966X_VCAP_IPV6_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 4)
enum {
PTP_PIN_ACTION_IDLE = 0,
PTP_PIN_ACTION_LOAD,
PTP_PIN_ACTION_SAVE,
PTP_PIN_ACTION_CLOCK,
PTP_PIN_ACTION_DELTA,
PTP_PIN_ACTION_TOD
};
static u64 lan966x_ptp_get_nominal_value(void)
{
/* This is the default value that for each system clock, the time of day
* is increased. It has the format 5.59 nanosecond.
*/
return 0x304d4873ecade305;
}
static int lan966x_ptp_add_trap(struct lan966x_port *port,
int (*add_ptp_key)(struct vcap_rule *vrule,
struct lan966x_port*),
u32 rule_id,
u16 proto)
{
struct lan966x *lan966x = port->lan966x;
struct vcap_rule *vrule;
int err;
vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
if (!IS_ERR(vrule)) {
u32 value, mask;
/* Just modify the ingress port mask and exit */
vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
&value, &mask);
mask &= ~BIT(port->chip_port);
vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
value, mask);
err = vcap_mod_rule(vrule);
goto free_rule;
}
vrule = vcap_alloc_rule(lan966x->vcap_ctrl, port->dev,
LAN966X_VCAP_CID_IS2_L0,
VCAP_USER_PTP, 0, rule_id);
if (IS_ERR(vrule))
return PTR_ERR(vrule);
err = add_ptp_key(vrule, port);
if (err)
goto free_rule;
err = vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
err |= vcap_val_rule(vrule, proto);
if (err)
goto free_rule;
err = vcap_add_rule(vrule);
free_rule:
/* Free the local copy of the rule */
vcap_free_rule(vrule);
return err;
}
static int lan966x_ptp_del_trap(struct lan966x_port *port,
u32 rule_id)
{
struct lan966x *lan966x = port->lan966x;
struct vcap_rule *vrule;
u32 value, mask;
int err;
vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
if (IS_ERR(vrule))
return -EEXIST;
vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask);
mask |= BIT(port->chip_port);
/* No other port requires this trap, so it is safe to remove it */
if (mask == GENMASK(lan966x->num_phys_ports, 0)) {
err = vcap_del_rule(lan966x->vcap_ctrl, port->dev, rule_id);
goto free_rule;
}
vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, value, mask);
err = vcap_mod_rule(vrule);
free_rule:
vcap_free_rule(vrule);
return err;
}
static int lan966x_ptp_add_l2_key(struct vcap_rule *vrule,
struct lan966x_port *port)
{
return vcap_rule_add_key_u32(vrule, VCAP_KF_ETYPE, ETH_P_1588, ~0);
}
static int lan966x_ptp_add_ip_event_key(struct vcap_rule *vrule,
struct lan966x_port *port)
{
return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_EV_PORT, ~0) ||
vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
}
static int lan966x_ptp_add_ip_general_key(struct vcap_rule *vrule,
struct lan966x_port *port)
{
return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_GEN_PORT, ~0) ||
vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
}
static int lan966x_ptp_add_l2_rule(struct lan966x_port *port)
{
return lan966x_ptp_add_trap(port, lan966x_ptp_add_l2_key,
LAN966X_VCAP_L2_PTP_TRAP, ETH_P_ALL);
}
static int lan966x_ptp_add_ipv4_rules(struct lan966x_port *port)
{
int err;
err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
LAN966X_VCAP_IPV4_EV_PTP_TRAP, ETH_P_IP);
if (err)
return err;
err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
LAN966X_VCAP_IPV4_GEN_PTP_TRAP, ETH_P_IP);
if (err)
lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
return err;
}
static int lan966x_ptp_add_ipv6_rules(struct lan966x_port *port)
{
int err;
err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
LAN966X_VCAP_IPV6_EV_PTP_TRAP, ETH_P_IPV6);
if (err)
return err;
err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
LAN966X_VCAP_IPV6_GEN_PTP_TRAP, ETH_P_IPV6);
if (err)
lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
return err;
}
static int lan966x_ptp_del_l2_rule(struct lan966x_port *port)
{
return lan966x_ptp_del_trap(port, LAN966X_VCAP_L2_PTP_TRAP);
}
static int lan966x_ptp_del_ipv4_rules(struct lan966x_port *port)
{
int err;
err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_GEN_PTP_TRAP);
return err;
}
static int lan966x_ptp_del_ipv6_rules(struct lan966x_port *port)
{
int err;
err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_GEN_PTP_TRAP);
return err;
}
static int lan966x_ptp_add_traps(struct lan966x_port *port)
{
int err;
err = lan966x_ptp_add_l2_rule(port);
if (err)
goto err_l2;
err = lan966x_ptp_add_ipv4_rules(port);
if (err)
goto err_ipv4;
err = lan966x_ptp_add_ipv6_rules(port);
if (err)
goto err_ipv6;
return err;
err_ipv6:
lan966x_ptp_del_ipv4_rules(port);
err_ipv4:
lan966x_ptp_del_l2_rule(port);
err_l2:
return err;
}
int lan966x_ptp_del_traps(struct lan966x_port *port)
{
int err;
err = lan966x_ptp_del_l2_rule(port);
err |= lan966x_ptp_del_ipv4_rules(port);
err |= lan966x_ptp_del_ipv6_rules(port);
return err;
}
int lan966x_ptp_setup_traps(struct lan966x_port *port,
struct kernel_hwtstamp_config *cfg)
{
if (cfg->rx_filter == HWTSTAMP_FILTER_NONE)
return lan966x_ptp_del_traps(port);
else
return lan966x_ptp_add_traps(port);
}
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = port->lan966x;
struct lan966x_phc *phc;
switch (cfg->tx_type) {
case HWTSTAMP_TX_ON:
port->ptp_tx_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
port->ptp_tx_cmd = IFH_REW_OP_ONE_STEP_PTP;
break;
case HWTSTAMP_TX_OFF:
port->ptp_tx_cmd = IFH_REW_OP_NOOP;
break;
default:
return -ERANGE;
}
switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->ptp_rx_cmd = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
port->ptp_rx_cmd = true;
cfg->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
/* Commit back the result & save it */
mutex_lock(&lan966x->ptp_lock);
phc = &lan966x->phc[LAN966X_PHC_PORT];
phc->hwtstamp_config = *cfg;
mutex_unlock(&lan966x->ptp_lock);
return 0;
}
void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
struct kernel_hwtstamp_config *cfg)
{
struct lan966x *lan966x = port->lan966x;
struct lan966x_phc *phc;
phc = &lan966x->phc[LAN966X_PHC_PORT];
*cfg = phc->hwtstamp_config;
}
static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
{
struct ptp_header *header;
u8 msgtype;
int type;
if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
return IFH_REW_OP_NOOP;
type = ptp_classify_raw(skb);
if (type == PTP_CLASS_NONE)
return IFH_REW_OP_NOOP;
header = ptp_parse_header(skb, type);
if (!header)
return IFH_REW_OP_NOOP;
if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
return IFH_REW_OP_TWO_STEP_PTP;
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
if ((msgtype & 0xf) == 0)
return IFH_REW_OP_ONE_STEP_PTP;
return IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
{
struct sk_buff *skb, *skb_tmp;
unsigned long flags;
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT,
jiffies)
break;
__skb_unlink(skb, &port->tx_skbs);
dev_kfree_skb_any(skb);
}
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
}
int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
struct sk_buff *skb)
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
u8 rew_op;
rew_op = lan966x_ptp_classify(port, skb);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;
lan966x_ptp_txtstamp_old_release(port);
spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) {
spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
return -EBUSY;
}
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&port->tx_skbs, skb);
LAN966X_SKB_CB(skb)->ts_id = port->ts_id;
LAN966X_SKB_CB(skb)->jiffies = jiffies;
lan966x->ptp_skbs++;
port->ts_id++;
if (port->ts_id == LAN966X_MAX_PTP_ID)
port->ts_id = 0;
spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
return 0;
}
void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
struct sk_buff *skb)
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
port->ts_id--;
lan966x->ptp_skbs--;
skb_unlink(skb, &port->tx_skbs);
spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
}
static void lan966x_get_hwtimestamp(struct lan966x *lan966x,
struct timespec64 *ts,
u32 nsec)
{
/* Read current PTP time to get seconds */
unsigned long flags;
u32 curr_nsec;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
ts->tv_nsec = nsec;
/* Sec has incremented since the ts was registered */
if (curr_nsec < nsec)
ts->tv_sec--;
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
}
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
{
int budget = LAN966X_MAX_PTP_ID;
struct lan966x *lan966x = args;
while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
struct lan966x_port *port;
struct timespec64 ts;
unsigned long flags;
u32 val, id, txport;
u32 delay;
val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
/* Check if a timestamp can be retrieved */
if (!(val & PTP_TWOSTEP_CTRL_VLD))
break;
WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL);
if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
continue;
/* Retrieve the ts Tx port */
txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
/* Retrieve its associated skb */
port = lan966x->ports[txport];
/* Retrieve the delay */
delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
/* Get next timestamp from fifo, which needs to be the
* rx timestamp which represents the id of the frame
*/
lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
PTP_TWOSTEP_CTRL_NXT,
lan966x, PTP_TWOSTEP_CTRL);
val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
/* Check if a timestamp can be retried */
if (!(val & PTP_TWOSTEP_CTRL_VLD))
break;
/* Read RX timestamping to get the ID */
id = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
if (LAN966X_SKB_CB(skb)->ts_id != id)
continue;
__skb_unlink(skb, &port->tx_skbs);
skb_match = skb;
break;
}
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
/* Next ts */
lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
PTP_TWOSTEP_CTRL_NXT,
lan966x, PTP_TWOSTEP_CTRL);
if (WARN_ON(!skb_match))
continue;
spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
lan966x->ptp_skbs--;
spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
/* Get the h/w timestamp */
lan966x_get_hwtimestamp(lan966x, &ts, delay);
/* Set the timestamp into the skb */
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb_match, &shhwtstamps);
dev_kfree_skb_any(skb_match);
}
return IRQ_HANDLED;
}
irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
{
struct lan966x *lan966x = args;
struct lan966x_phc *phc;
unsigned long flags;
u64 time = 0;
time64_t s;
int pin, i;
s64 ns;
if (!(lan_rd(lan966x, PTP_PIN_INTR)))
return IRQ_NONE;
/* Go through all domains and see which pin generated the interrupt */
for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
struct ptp_clock_event ptp_event = {0};
phc = &lan966x->phc[i];
pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
if (pin == -1)
continue;
if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
continue;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
/* Enable to get the new interrupt.
* By writing 1 it clears the bit
*/
lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
/* Get current time */
s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
s <<= 32;
s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
ns &= PTP_TOD_NSEC_TOD_NSEC;
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
s--;
ns &= 0xf;
ns += 999999984;
}
time = ktime_set(s, ns);
ptp_event.index = pin;
ptp_event.timestamp = time;
ptp_event.type = PTP_CLOCK_EXTTS;
ptp_clock_event(phc->clock, &ptp_event);
}
return IRQ_HANDLED;
}
static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
unsigned long flags;
bool neg_adj = 0;
u64 tod_inc;
u64 ref;
if (!scaled_ppm)
return 0;
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
tod_inc = lan966x_ptp_get_nominal_value();
/* The multiplication is split in 2 separate additions because of
* overflow issues. If scaled_ppm with 16bit fractional part was bigger
* than 20ppm then we got overflow.
*/
ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16);
ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)),
PTP_DOM_CFG_CLKCFG_DIS,
lan966x, PTP_DOM_CFG);
lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x,
PTP_CLK_PER_CFG(phc->index, 0));
lan_wr((u32)(tod_inc >> 32), lan966x,
PTP_CLK_PER_CFG(phc->index, 1));
lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
PTP_DOM_CFG_CLKCFG_DIS,
lan966x, PTP_DOM_CFG);
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
return 0;
}
static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
unsigned long flags;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
/* Must be in IDLE mode before the time can be loaded */
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
/* Set new value */
lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
lan_wr(lower_32_bits(ts->tv_sec),
lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
/* Apply new values */
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
return 0;
}
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
unsigned long flags;
time64_t s;
s64 ns;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
s <<= 32;
s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
ns &= PTP_TOD_NSEC_TOD_NSEC;
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
/* Deal with negative values */
if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
s--;
ns &= 0xf;
ns += 999999984;
}
set_normalized_timespec64(ts, s, ns);
return 0;
}
static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
unsigned long flags;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
/* Must be in IDLE mode before the time can be loaded */
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta),
lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
/* Adjust time with the value of PTP_TOD_NSEC */
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
} else {
/* Fall back using lan966x_ptp_settime64 which is not exact */
struct timespec64 ts;
u64 now;
lan966x_ptp_gettime64(ptp, &ts);
now = ktime_to_ns(timespec64_to_ktime(ts));
ts = ns_to_timespec64(now + delta);
lan966x_ptp_settime64(ptp, &ts);
}
return 0;
}
static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
struct ptp_clock_info *info;
int i;
/* Currently support only 1 channel */
if (chan != 0)
return -1;
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
case PTP_PF_EXTTS:
break;
default:
return -1;
}
/* The PTP pins are shared by all the PHC. So it is required to see if
* the pin is connected to another PHC. The pin is connected to another
* PHC if that pin already has a function on that PHC.
*/
for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
info = &lan966x->phc[i].info;
/* Ignore the check with ourself */
if (ptp == info)
continue;
if (info->pin_config[pin].func == PTP_PF_PEROUT ||
info->pin_config[pin].func == PTP_PF_EXTTS)
return -1;
}
return 0;
}
static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
struct timespec64 ts_phase, ts_period;
unsigned long flags;
s64 wf_high, wf_low;
bool pps = false;
int pin;
if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
PTP_PEROUT_PHASE))
return -EOPNOTSUPP;
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
if (!on) {
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(pin));
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
return 0;
}
if (rq->perout.period.sec == 1 &&
rq->perout.period.nsec == 0)
pps = true;
if (rq->perout.flags & PTP_PEROUT_PHASE) {
ts_phase.tv_sec = rq->perout.phase.sec;
ts_phase.tv_nsec = rq->perout.phase.nsec;
} else {
ts_phase.tv_sec = rq->perout.start.sec;
ts_phase.tv_nsec = rq->perout.start.nsec;
}
if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
dev_warn(lan966x->dev,
"Absolute time not supported!\n");
return -EINVAL;
}
if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
struct timespec64 ts_on;
ts_on.tv_sec = rq->perout.on.sec;
ts_on.tv_nsec = rq->perout.on.nsec;
wf_high = timespec64_to_ns(&ts_on);
} else {
wf_high = 5000;
}
if (pps) {
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
lan966x, PTP_WF_LOW_PERIOD(pin));
lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
lan966x, PTP_WF_HIGH_PERIOD(pin));
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(3),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(pin));
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
return 0;
}
ts_period.tv_sec = rq->perout.period.sec;
ts_period.tv_nsec = rq->perout.period.nsec;
wf_low = timespec64_to_ns(&ts_period);
wf_low -= wf_high;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
lan966x, PTP_WF_LOW_PERIOD(pin));
lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
lan966x, PTP_WF_HIGH_PERIOD(pin));
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SYNC_SET(0),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SYNC,
lan966x, PTP_PIN_CFG(pin));
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
return 0;
}
static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
unsigned long flags;
int pin;
u32 val;
if (lan966x->ptp_ext_irq <= 0)
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
PTP_PIN_CFG_PIN_SELECT_SET(pin),
PTP_PIN_CFG_PIN_ACTION |
PTP_PIN_CFG_PIN_SYNC |
PTP_PIN_CFG_PIN_DOM |
PTP_PIN_CFG_PIN_SELECT,
lan966x, PTP_PIN_CFG(pin));
val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
if (on)
val |= BIT(pin);
else
val &= ~BIT(pin);
lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
return 0;
}
static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
return lan966x_ptp_perout(ptp, rq, on);
case PTP_CLK_REQ_EXTTS:
return lan966x_ptp_extts(ptp, rq, on);
default:
return -EOPNOTSUPP;
}
return 0;
}
static struct ptp_clock_info lan966x_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "lan966x ptp",
.max_adj = 200000,
.gettime64 = lan966x_ptp_gettime64,
.settime64 = lan966x_ptp_settime64,
.adjtime = lan966x_ptp_adjtime,
.adjfine = lan966x_ptp_adjfine,
.verify = lan966x_ptp_verify,
.enable = lan966x_ptp_enable,
.n_per_out = LAN966X_PHC_PINS_NUM,
.n_ext_ts = LAN966X_PHC_PINS_NUM,
.n_pins = LAN966X_PHC_PINS_NUM,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
int index,
struct ptp_clock_info *clock_info)
{
struct lan966x_phc *phc = &lan966x->phc[index];
struct ptp_pin_desc *p;
int i;
for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
p = &phc->pins[i];
snprintf(p->name, sizeof(p->name), "pin%d", i);
p->index = i;
p->func = PTP_PF_NONE;
}
phc->info = *clock_info;
phc->info.pin_config = &phc->pins[0];
phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
if (IS_ERR(phc->clock))
return PTR_ERR(phc->clock);
phc->index = index;
phc->lan966x = lan966x;
return 0;
}
int lan966x_ptp_init(struct lan966x *lan966x)
{
u64 tod_adj = lan966x_ptp_get_nominal_value();
struct lan966x_port *port;
int err, i;
if (!lan966x->ptp)
return 0;
for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info);
if (err)
return err;
}
spin_lock_init(&lan966x->ptp_clock_lock);
spin_lock_init(&lan966x->ptp_ts_id_lock);
mutex_init(&lan966x->ptp_lock);
/* Disable master counters */
lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG);
/* Configure the nominal TOD increment per clock cycle */
lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7),
PTP_DOM_CFG_CLKCFG_DIS,
lan966x, PTP_DOM_CFG);
for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x,
PTP_CLK_PER_CFG(i, 0));
lan_wr((u32)(tod_adj >> 32), lan966x,
PTP_CLK_PER_CFG(i, 1));
}
lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
PTP_DOM_CFG_CLKCFG_DIS,
lan966x, PTP_DOM_CFG);
/* Enable master counters */
lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG);
for (i = 0; i < lan966x->num_phys_ports; i++) {
port = lan966x->ports[i];
if (!port)
continue;
skb_queue_head_init(&port->tx_skbs);
}
return 0;
}
void lan966x_ptp_deinit(struct lan966x *lan966x)
{
struct lan966x_port *port;
int i;
if (!lan966x->ptp)
return;
for (i = 0; i < lan966x->num_phys_ports; i++) {
port = lan966x->ports[i];
if (!port)
continue;
skb_queue_purge(&port->tx_skbs);
}
for (i = 0; i < LAN966X_PHC_COUNT; ++i)
ptp_clock_unregister(lan966x->phc[i].clock);
}
void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u64 src_port, u64 timestamp)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct lan966x_phc *phc;
struct timespec64 ts;
u64 full_ts_in_ns;
if (!lan966x->ptp ||
!lan966x->ports[src_port]->ptp_rx_cmd)
return;
phc = &lan966x->phc[LAN966X_PHC_PORT];
lan966x_ptp_gettime64(&phc->info, &ts);
/* Drop the sub-ns precision */
timestamp = timestamp >> 2;
if (ts.tv_nsec < timestamp)
ts.tv_sec--;
ts.tv_nsec = timestamp;
full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = full_ts_in_ns;
}
u32 lan966x_ptp_get_period_ps(void)
{
/* This represents the system clock period in picoseconds */
return 15125;
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2021 Red Hat GmbH
*
* Author: Florian Westphal <[email protected]>
*/
#include <linux/bpf.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_hook.h>
#include <net/netfilter/nf_tables.h>
#include <net/sock.h>
static const struct nla_policy nfnl_hook_nla_policy[NFNLA_HOOK_MAX + 1] = {
[NFNLA_HOOK_HOOKNUM] = { .type = NLA_U32 },
[NFNLA_HOOK_PRIORITY] = { .type = NLA_U32 },
[NFNLA_HOOK_DEV] = { .type = NLA_STRING,
.len = IFNAMSIZ - 1 },
[NFNLA_HOOK_FUNCTION_NAME] = { .type = NLA_NUL_STRING,
.len = KSYM_NAME_LEN, },
[NFNLA_HOOK_MODULE_NAME] = { .type = NLA_NUL_STRING,
.len = MODULE_NAME_LEN, },
[NFNLA_HOOK_CHAIN_INFO] = { .type = NLA_NESTED, },
};
static int nf_netlink_dump_start_rcu(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *c)
{
int err;
if (!try_module_get(THIS_MODULE))
return -EINVAL;
rcu_read_unlock();
err = netlink_dump_start(nlsk, skb, nlh, c);
rcu_read_lock();
module_put(THIS_MODULE);
return err;
}
struct nfnl_dump_hook_data {
char devname[IFNAMSIZ];
unsigned long headv;
u8 hook;
};
static struct nlattr *nfnl_start_info_type(struct sk_buff *nlskb, enum nfnl_hook_chaintype t)
{
struct nlattr *nest = nla_nest_start(nlskb, NFNLA_HOOK_CHAIN_INFO);
int ret;
if (!nest)
return NULL;
ret = nla_put_be32(nlskb, NFNLA_HOOK_INFO_TYPE, htonl(t));
if (ret == 0)
return nest;
nla_nest_cancel(nlskb, nest);
return NULL;
}
static int nfnl_hook_put_bpf_prog_info(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
unsigned int seq,
const struct bpf_prog *prog)
{
struct nlattr *nest, *nest2;
int ret;
if (!IS_ENABLED(CONFIG_NETFILTER_BPF_LINK))
return 0;
if (WARN_ON_ONCE(!prog))
return 0;
nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_BPF);
if (!nest)
return -EMSGSIZE;
nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
if (!nest2)
goto cancel_nest;
ret = nla_put_be32(nlskb, NFNLA_HOOK_BPF_ID, htonl(prog->aux->id));
if (ret)
goto cancel_nest;
nla_nest_end(nlskb, nest2);
nla_nest_end(nlskb, nest);
return 0;
cancel_nest:
nla_nest_cancel(nlskb, nest);
return -EMSGSIZE;
}
static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
unsigned int seq,
struct nft_chain *chain)
{
struct net *net = sock_net(nlskb->sk);
struct nlattr *nest, *nest2;
int ret = 0;
if (WARN_ON_ONCE(!chain))
return 0;
if (!nft_is_active(net, chain))
return 0;
nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_NFTABLES);
if (!nest)
return -EMSGSIZE;
nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
if (!nest2)
goto cancel_nest;
ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
if (ret)
goto cancel_nest;
ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
if (ret)
goto cancel_nest;
ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
if (ret)
goto cancel_nest;
nla_nest_end(nlskb, nest2);
nla_nest_end(nlskb, nest);
return ret;
cancel_nest:
nla_nest_cancel(nlskb, nest);
return -EMSGSIZE;
}
static int nfnl_hook_dump_one(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
const struct nf_hook_ops *ops,
int family, unsigned int seq)
{
u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
unsigned int portid = NETLINK_CB(nlskb).portid;
struct nlmsghdr *nlh;
int ret = -EMSGSIZE;
u32 hooknum;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
char *module_name;
#endif
nlh = nfnl_msg_put(nlskb, portid, seq, event,
NLM_F_MULTI, family, NFNETLINK_V0, 0);
if (!nlh)
goto nla_put_failure;
#ifdef CONFIG_KALLSYMS
ret = snprintf(sym, sizeof(sym), "%ps", ops->hook);
if (ret >= sizeof(sym)) {
ret = -EINVAL;
goto nla_put_failure;
}
module_name = strstr(sym, " [");
if (module_name) {
char *end;
*module_name = '\0';
module_name += 2;
end = strchr(module_name, ']');
if (end) {
*end = 0;
ret = nla_put_string(nlskb, NFNLA_HOOK_MODULE_NAME, module_name);
if (ret)
goto nla_put_failure;
}
}
ret = nla_put_string(nlskb, NFNLA_HOOK_FUNCTION_NAME, sym);
if (ret)
goto nla_put_failure;
#endif
if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS)
hooknum = NF_NETDEV_INGRESS;
else
hooknum = ops->hooknum;
ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
if (ret)
goto nla_put_failure;
ret = nla_put_be32(nlskb, NFNLA_HOOK_PRIORITY, htonl(ops->priority));
if (ret)
goto nla_put_failure;
switch (ops->hook_ops_type) {
case NF_HOOK_OP_NF_TABLES:
ret = nfnl_hook_put_nft_chain_info(nlskb, ctx, seq, ops->priv);
break;
case NF_HOOK_OP_BPF:
ret = nfnl_hook_put_bpf_prog_info(nlskb, ctx, seq, ops->priv);
break;
case NF_HOOK_OP_UNDEFINED:
break;
default:
WARN_ON_ONCE(1);
break;
}
if (ret)
goto nla_put_failure;
nlmsg_end(nlskb, nlh);
return 0;
nla_put_failure:
nlmsg_trim(nlskb, nlh);
return ret;
}
static const struct nf_hook_entries *
nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
{
const struct nf_hook_entries *hook_head = NULL;
#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS)
struct net_device *netdev;
#endif
switch (pf) {
case NFPROTO_IPV4:
if (hook >= ARRAY_SIZE(net->nf.hooks_ipv4))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
break;
case NFPROTO_IPV6:
if (hook >= ARRAY_SIZE(net->nf.hooks_ipv6))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
break;
case NFPROTO_ARP:
#ifdef CONFIG_NETFILTER_FAMILY_ARP
if (hook >= ARRAY_SIZE(net->nf.hooks_arp))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
#endif
break;
case NFPROTO_BRIDGE:
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
if (hook >= ARRAY_SIZE(net->nf.hooks_bridge))
return ERR_PTR(-EINVAL);
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS)
case NFPROTO_NETDEV:
if (hook >= NF_NETDEV_NUMHOOKS)
return ERR_PTR(-EOPNOTSUPP);
if (!dev)
return ERR_PTR(-ENODEV);
netdev = dev_get_by_name_rcu(net, dev);
if (!netdev)
return ERR_PTR(-ENODEV);
#ifdef CONFIG_NETFILTER_INGRESS
if (hook == NF_NETDEV_INGRESS)
return rcu_dereference(netdev->nf_hooks_ingress);
#endif
#ifdef CONFIG_NETFILTER_EGRESS
if (hook == NF_NETDEV_EGRESS)
return rcu_dereference(netdev->nf_hooks_egress);
#endif
fallthrough;
#endif
default:
return ERR_PTR(-EPROTONOSUPPORT);
}
return hook_head;
}
static int nfnl_hook_dump(struct sk_buff *nlskb,
struct netlink_callback *cb)
{
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nfnl_dump_hook_data *ctx = cb->data;
int err, family = nfmsg->nfgen_family;
struct net *net = sock_net(nlskb->sk);
struct nf_hook_ops * const *ops;
const struct nf_hook_entries *e;
unsigned int i = cb->args[0];
rcu_read_lock();
e = nfnl_hook_entries_head(family, ctx->hook, net, ctx->devname);
if (!e)
goto done;
if (IS_ERR(e)) {
cb->seq++;
goto done;
}
if ((unsigned long)e != ctx->headv || i >= e->num_hook_entries)
cb->seq++;
ops = nf_hook_entries_get_hook_ops(e);
for (; i < e->num_hook_entries; i++) {
err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family,
cb->nlh->nlmsg_seq);
if (err)
break;
}
done:
nl_dump_check_consistent(cb, nlmsg_hdr(nlskb));
rcu_read_unlock();
cb->args[0] = i;
return nlskb->len;
}
static int nfnl_hook_dump_start(struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nlattr * const *nla = cb->data;
struct nfnl_dump_hook_data *ctx = NULL;
struct net *net = sock_net(cb->skb->sk);
u8 family = nfmsg->nfgen_family;
char name[IFNAMSIZ] = "";
const void *head;
u32 hooknum;
hooknum = ntohl(nla_get_be32(nla[NFNLA_HOOK_HOOKNUM]));
if (hooknum > 255)
return -EINVAL;
if (family == NFPROTO_NETDEV) {
if (!nla[NFNLA_HOOK_DEV])
return -EINVAL;
nla_strscpy(name, nla[NFNLA_HOOK_DEV], sizeof(name));
}
rcu_read_lock();
/* Not dereferenced; for consistency check only */
head = nfnl_hook_entries_head(family, hooknum, net, name);
rcu_read_unlock();
if (head && IS_ERR(head))
return PTR_ERR(head);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
strscpy(ctx->devname, name, sizeof(ctx->devname));
ctx->headv = (unsigned long)head;
ctx->hook = hooknum;
cb->seq = 1;
cb->data = ctx;
return 0;
}
static int nfnl_hook_dump_stop(struct netlink_callback *cb)
{
kfree(cb->data);
return 0;
}
static int nfnl_hook_get(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
if (!nla[NFNLA_HOOK_HOOKNUM])
return -EINVAL;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nfnl_hook_dump_start,
.done = nfnl_hook_dump_stop,
.dump = nfnl_hook_dump,
.module = THIS_MODULE,
.data = (void *)nla,
};
return nf_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
return -EOPNOTSUPP;
}
static const struct nfnl_callback nfnl_hook_cb[NFNL_MSG_HOOK_MAX] = {
[NFNL_MSG_HOOK_GET] = {
.call = nfnl_hook_get,
.type = NFNL_CB_RCU,
.attr_count = NFNLA_HOOK_MAX,
.policy = nfnl_hook_nla_policy
},
};
static const struct nfnetlink_subsystem nfhook_subsys = {
.name = "nfhook",
.subsys_id = NFNL_SUBSYS_HOOK,
.cb_count = NFNL_MSG_HOOK_MAX,
.cb = nfnl_hook_cb,
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_HOOK);
static int __init nfnetlink_hook_init(void)
{
return nfnetlink_subsys_register(&nfhook_subsys);
}
static void __exit nfnetlink_hook_exit(void)
{
nfnetlink_subsys_unregister(&nfhook_subsys);
}
module_init(nfnetlink_hook_init);
module_exit(nfnetlink_hook_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <[email protected]>");
MODULE_DESCRIPTION("nfnetlink_hook: list registered netfilter hooks");
|
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "resource.h"
#include "mcif_wb.h"
#include "dcn30_mmhubbub.h"
#define REG(reg)\
mcif_wb30->mcif_wb_regs->reg
#define CTX \
mcif_wb30->base.ctx
#undef FN
#define FN(reg_name, field_name) \
mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name
#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8
#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40
/* wbif programming guide:
* 1. set up wbif parameter:
* unsigned long long luma_address[4]; //4 frame buffer
* unsigned long long chroma_address[4];
* unsigned int luma_pitch;
* unsigned int chroma_pitch;
* unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10
* unsigned int slice_lines; //slice size
* unsigned int time_per_pixel; // time per pixel, in ns
* unsigned int arbitration_slice; // 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes
* unsigned int max_scaled_time; // used for QOS generation
* unsigned int swlock=0x0;
* unsigned int cli_watermark[4]; //4 group urgent watermark
* unsigned int pstate_watermark[4]; //4 group pstate watermark
* unsigned int sw_int_en; // Software interrupt enable, frame end and overflow
* unsigned int sw_slice_int_en; // slice end interrupt enable
* unsigned int sw_overrun_int_en; // overrun error interrupt enable
* unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow
* unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow
*
* 2. configure wbif register
* a. call mmhubbub_config_wbif()
*
* 3. Enable wbif
* call set_wbif_bufmgr_enable();
*
* 4. wbif_dump_status(), option, for debug purpose
* the bufmgr status can show the progress of write back, can be used for debug purpose
*/
static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb,
struct mcif_warmup_params *params)
{
struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
union large_integer start_address_shift = {.quad_part = params->start_address.quad_part >> 5};
/* Set base address and region size for warmup */
REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, 0, MMHUBBUB_WARMUP_BASE_ADDR_HIGH, start_address_shift.high_part);
REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_LOW, 0, MMHUBBUB_WARMUP_BASE_ADDR_LOW, start_address_shift.low_part);
REG_SET(MMHUBBUB_WARMUP_ADDR_REGION, 0, MMHUBBUB_WARMUP_ADDR_REGION, params->region_size >> 5);
// REG_SET(MMHUBBUB_WARMUP_P_VMID, 0, MMHUBBUB_WARMUP_P_VMID, params->p_vmid);
/* Set address increment and enable warmup */
REG_SET_3(MMHUBBUB_WARMUP_CONTROL_STATUS, 0, MMHUBBUB_WARMUP_EN, true,
MMHUBBUB_WARMUP_SW_INT_EN, true,
MMHUBBUB_WARMUP_INC_ADDR, params->address_increment >> 5);
/* Wait for an interrupt to signal warmup is completed */
REG_WAIT(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_STATUS, 1, 20, 100);
/* Acknowledge interrupt */
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_ACK, 1);
/* Disable warmup */
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0]));
REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0]));
REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0]));
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1]));
REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1]));
REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1]));
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2]));
REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2]));
REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2]));
/* buffer address for packing mode or Luma in planar mode */
REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3]));
REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3]));
/* buffer address for Chroma in planar mode (unused in packing mode) */
REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3]));
REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3]));
/* setup luma & chroma size
* should be enough to contain a whole frame Luma data,
* the programmed value is frame buffer size [27:8], 256-byte aligned
*/
REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height);
REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height);
/* enable address fence */
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1);
/* setup pitch, the programmed value is [15:8], 256B align */
REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8,
MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8);
}
static void mmhubbub3_config_mcif_arb(struct mcif_wb *mcif_wb,
struct mcif_arb_params *params)
{
struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
/* Programmed by the video driver based on the CRTC timing (for DWB) */
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel);
/* Programming dwb watermark */
/* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */
/* Program in ns. A formula will be provided in the pseudo code to calculate the value. */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x0);
/* urgent_watermarkA */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]);
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x1);
/* urgent_watermarkB */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]);
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x2);
/* urgent_watermarkC */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]);
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x3);
/* urgent_watermarkD */
REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]);
/* Programming nb pstate watermark */
/* nbp_state_change_watermarkA */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]);
/* nbp_state_change_watermarkB */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]);
/* nbp_state_change_watermarkC */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]);
/* nbp_state_change_watermarkD */
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3);
REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]);
/* dram_speed_change_duration */
REG_UPDATE(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI,
MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, params->dram_speed_change_duration);
/* max_scaled_time */
REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time);
/* slice_lines */
REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1);
/* Set arbitration unit for Luma/Chroma */
/* arb_unit=2 should be chosen for more efficiency */
/* Arbitration size, 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes */
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
}
static const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
.warmup_mcif = mmhubbub3_warmup_mcif,
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
.config_mcif_buf = mmhubbub3_config_mcif_buf,
.config_mcif_arb = mmhubbub3_config_mcif_arb,
.config_mcif_irq = mmhubbub2_config_mcif_irq,
.dump_frame = mcifwb2_dump_frame,
};
void dcn30_mmhubbub_construct(struct dcn30_mmhubbub *mcif_wb30,
struct dc_context *ctx,
const struct dcn30_mmhubbub_registers *mcif_wb_regs,
const struct dcn30_mmhubbub_shift *mcif_wb_shift,
const struct dcn30_mmhubbub_mask *mcif_wb_mask,
int inst)
{
mcif_wb30->base.ctx = ctx;
mcif_wb30->base.inst = inst;
mcif_wb30->base.funcs = &dcn30_mmhubbub_funcs;
mcif_wb30->mcif_wb_regs = mcif_wb_regs;
mcif_wb30->mcif_wb_shift = mcif_wb_shift;
mcif_wb30->mcif_wb_mask = mcif_wb_mask;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#ifndef _IPA_RESOURCE_H_
#define _IPA_RESOURCE_H_
struct ipa;
struct ipa_resource_data;
/**
* ipa_resource_config() - Configure resources
* @ipa: IPA pointer
* @data: IPA resource configuration data
*
* There is no need for a matching ipa_resource_deconfig() function.
*
* Return: true if all regions are valid, false otherwise
*/
int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data);
#endif /* _IPA_RESOURCE_H_ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 Boris BREZILLON <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include "pmc.h"
#define MASTER_PRES_MASK 0x7
#define MASTER_PRES_MAX MASTER_PRES_MASK
#define MASTER_DIV_SHIFT 8
#define MASTER_DIV_MASK 0x7
#define PMC_MCR_CSS_SHIFT (16)
#define MASTER_MAX_ID 4
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
struct clk_master {
struct clk_hw hw;
struct regmap *regmap;
spinlock_t *lock;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
struct at91_clk_pms pms;
u32 *mux_table;
u32 mckr;
int chg_pid;
u8 id;
u8 parent;
u8 div;
u32 safe_div;
};
/* MCK div reference to be used by notifier. */
static struct clk_master *master_div;
static inline bool clk_master_ready(struct clk_master *master)
{
unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY;
unsigned int status;
regmap_read(master->regmap, AT91_PMC_SR, &status);
return !!(status & bit);
}
static int clk_master_prepare(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
spin_lock_irqsave(master->lock, flags);
while (!clk_master_ready(master))
cpu_relax();
spin_unlock_irqrestore(master->lock, flags);
return 0;
}
static int clk_master_is_prepared(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
bool status;
spin_lock_irqsave(master->lock, flags);
status = clk_master_ready(master);
spin_unlock_irqrestore(master->lock, flags);
return status;
}
static unsigned long clk_master_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u8 div;
unsigned long flags, rate = parent_rate;
struct clk_master *master = to_clk_master(hw);
const struct clk_master_layout *layout = master->layout;
const struct clk_master_characteristics *characteristics =
master->characteristics;
unsigned int mckr;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, flags);
mckr &= layout->mask;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
rate /= characteristics->divisors[div];
if (rate < characteristics->output.min)
pr_warn("master clk div is underclocked");
else if (rate > characteristics->output.max)
pr_warn("master clk div is overclocked");
return rate;
}
static int clk_master_div_save_context(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
struct clk_hw *parent_hw = clk_hw_get_parent(hw);
unsigned long flags;
unsigned int mckr, div;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, flags);
mckr &= master->layout->mask;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
div = master->characteristics->divisors[div];
master->pms.parent_rate = clk_hw_get_rate(parent_hw);
master->pms.rate = DIV_ROUND_CLOSEST(master->pms.parent_rate, div);
return 0;
}
static void clk_master_div_restore_context(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
unsigned int mckr;
u8 div;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, flags);
mckr &= master->layout->mask;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
div = master->characteristics->divisors[div];
if (div != DIV_ROUND_CLOSEST(master->pms.parent_rate, master->pms.rate))
pr_warn("MCKR DIV not configured properly by firmware!\n");
}
static const struct clk_ops master_div_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_div_recalc_rate,
.save_context = clk_master_div_save_context,
.restore_context = clk_master_div_restore_context,
};
/* This function must be called with lock acquired. */
static int clk_master_div_set(struct clk_master *master,
unsigned long parent_rate, int div)
{
const struct clk_master_characteristics *characteristics =
master->characteristics;
unsigned long rate = parent_rate;
unsigned int max_div = 0, div_index = 0, max_div_index = 0;
unsigned int i, mckr, tmp;
int ret;
for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
if (!characteristics->divisors[i])
break;
if (div == characteristics->divisors[i])
div_index = i;
if (max_div < characteristics->divisors[i]) {
max_div = characteristics->divisors[i];
max_div_index = i;
}
}
if (div > max_div)
div_index = max_div_index;
ret = regmap_read(master->regmap, master->layout->offset, &mckr);
if (ret)
return ret;
mckr &= master->layout->mask;
tmp = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
if (tmp == div_index)
return 0;
rate /= characteristics->divisors[div_index];
if (rate < characteristics->output.min)
pr_warn("master clk div is underclocked");
else if (rate > characteristics->output.max)
pr_warn("master clk div is overclocked");
mckr &= ~(MASTER_DIV_MASK << MASTER_DIV_SHIFT);
mckr |= (div_index << MASTER_DIV_SHIFT);
ret = regmap_write(master->regmap, master->layout->offset, mckr);
if (ret)
return ret;
while (!clk_master_ready(master))
cpu_relax();
master->div = characteristics->divisors[div_index];
return 0;
}
static unsigned long clk_master_div_recalc_rate_chg(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_master *master = to_clk_master(hw);
return DIV_ROUND_CLOSEST_ULL(parent_rate, master->div);
}
static void clk_master_div_restore_context_chg(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
int ret;
spin_lock_irqsave(master->lock, flags);
ret = clk_master_div_set(master, master->pms.parent_rate,
DIV_ROUND_CLOSEST(master->pms.parent_rate,
master->pms.rate));
spin_unlock_irqrestore(master->lock, flags);
if (ret)
pr_warn("Failed to restore MCK DIV clock\n");
}
static const struct clk_ops master_div_ops_chg = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_div_recalc_rate_chg,
.save_context = clk_master_div_save_context,
.restore_context = clk_master_div_restore_context_chg,
};
static int clk_master_div_notifier_fn(struct notifier_block *notifier,
unsigned long code, void *data)
{
const struct clk_master_characteristics *characteristics =
master_div->characteristics;
struct clk_notifier_data *cnd = data;
unsigned long flags, new_parent_rate, new_rate;
unsigned int mckr, div, new_div = 0;
int ret, i;
long tmp_diff;
long best_diff = -1;
spin_lock_irqsave(master_div->lock, flags);
switch (code) {
case PRE_RATE_CHANGE:
/*
* We want to avoid any overclocking of MCK DIV domain. To do
* this we set a safe divider (the underclocking is not of
* interest as we can go as low as 32KHz). The relation
* b/w this clock and its parents are as follows:
*
* FRAC PLL -> DIV PLL -> MCK DIV
*
* With the proper safe divider we should be good even with FRAC
* PLL at its maximum value.
*/
ret = regmap_read(master_div->regmap, master_div->layout->offset,
&mckr);
if (ret) {
ret = NOTIFY_STOP_MASK;
goto unlock;
}
mckr &= master_div->layout->mask;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
/* Switch to safe divider. */
clk_master_div_set(master_div,
cnd->old_rate * characteristics->divisors[div],
master_div->safe_div);
break;
case POST_RATE_CHANGE:
/*
* At this point we want to restore MCK DIV domain to its maximum
* allowed rate.
*/
ret = regmap_read(master_div->regmap, master_div->layout->offset,
&mckr);
if (ret) {
ret = NOTIFY_STOP_MASK;
goto unlock;
}
mckr &= master_div->layout->mask;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
new_parent_rate = cnd->new_rate * characteristics->divisors[div];
for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
if (!characteristics->divisors[i])
break;
new_rate = DIV_ROUND_CLOSEST_ULL(new_parent_rate,
characteristics->divisors[i]);
tmp_diff = characteristics->output.max - new_rate;
if (tmp_diff < 0)
continue;
if (best_diff < 0 || best_diff > tmp_diff) {
new_div = characteristics->divisors[i];
best_diff = tmp_diff;
}
if (!tmp_diff)
break;
}
if (!new_div) {
ret = NOTIFY_STOP_MASK;
goto unlock;
}
/* Update the div to preserve MCK DIV clock rate. */
clk_master_div_set(master_div, new_parent_rate,
new_div);
ret = NOTIFY_OK;
break;
default:
ret = NOTIFY_DONE;
break;
}
unlock:
spin_unlock_irqrestore(master_div->lock, flags);
return ret;
}
static struct notifier_block clk_master_div_notifier = {
.notifier_call = clk_master_div_notifier_fn,
};
static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
struct clk_hw *parent,
unsigned long parent_rate,
long *best_rate,
long *best_diff,
u32 div)
{
unsigned long tmp_rate, tmp_diff;
if (div == MASTER_PRES_MAX)
tmp_rate = parent_rate / 3;
else
tmp_rate = parent_rate >> div;
tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) {
*best_rate = tmp_rate;
*best_diff = tmp_diff;
req->best_parent_rate = parent_rate;
req->best_parent_hw = parent;
}
}
static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_master *master = to_clk_master(hw);
const struct clk_master_characteristics *characteristics =
master->characteristics;
unsigned long flags;
unsigned int val, pres;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &val);
spin_unlock_irqrestore(master->lock, flags);
val &= master->layout->mask;
pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
if (pres == MASTER_PRES_MAX && characteristics->have_div3_pres)
pres = 3;
else
pres = (1 << pres);
return DIV_ROUND_CLOSEST_ULL(parent_rate, pres);
}
static u8 clk_master_pres_get_parent(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
unsigned int mckr;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, flags);
mckr &= master->layout->mask;
return mckr & AT91_PMC_CSS;
}
static int clk_master_pres_save_context(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
struct clk_hw *parent_hw = clk_hw_get_parent(hw);
unsigned long flags;
unsigned int val, pres;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &val);
spin_unlock_irqrestore(master->lock, flags);
val &= master->layout->mask;
pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres)
pres = 3;
else
pres = (1 << pres);
master->pms.parent = val & AT91_PMC_CSS;
master->pms.parent_rate = clk_hw_get_rate(parent_hw);
master->pms.rate = DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres);
return 0;
}
static void clk_master_pres_restore_context(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
unsigned int val, pres;
spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &val);
spin_unlock_irqrestore(master->lock, flags);
val &= master->layout->mask;
pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres)
pres = 3;
else
pres = (1 << pres);
if (master->pms.rate !=
DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres) ||
(master->pms.parent != (val & AT91_PMC_CSS)))
pr_warn("MCKR PRES was not configured properly by firmware!\n");
}
static const struct clk_ops master_pres_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_pres_recalc_rate,
.get_parent = clk_master_pres_get_parent,
.save_context = clk_master_pres_save_context,
.restore_context = clk_master_pres_restore_context,
};
static struct clk_hw * __init
at91_clk_register_master_internal(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
struct clk_hw **parent_hws,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
const struct clk_ops *ops, spinlock_t *lock, u32 flags)
{
struct clk_master *master;
struct clk_init_data init = {};
struct clk_hw *hw;
unsigned int mckr;
unsigned long irqflags;
int ret;
if (!name || !num_parents || !(parent_names || parent_hws) || !lock)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = ops;
if (parent_hws)
init.parent_hws = (const struct clk_hw **)parent_hws;
else
init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = flags;
master->hw.init = &init;
master->layout = layout;
master->characteristics = characteristics;
master->regmap = regmap;
master->lock = lock;
if (ops == &master_div_ops_chg) {
spin_lock_irqsave(master->lock, irqflags);
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, irqflags);
mckr &= layout->mask;
mckr = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
master->div = characteristics->divisors[mckr];
}
hw = &master->hw;
ret = clk_hw_register(NULL, &master->hw);
if (ret) {
kfree(master);
hw = ERR_PTR(ret);
}
return hw;
}
struct clk_hw * __init
at91_clk_register_master_pres(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
struct clk_hw **parent_hws,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
spinlock_t *lock)
{
return at91_clk_register_master_internal(regmap, name, num_parents,
parent_names, parent_hws, layout,
characteristics,
&master_pres_ops,
lock, CLK_SET_RATE_GATE);
}
struct clk_hw * __init
at91_clk_register_master_div(struct regmap *regmap,
const char *name, const char *parent_name,
struct clk_hw *parent_hw, const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
spinlock_t *lock, u32 flags, u32 safe_div)
{
const struct clk_ops *ops;
struct clk_hw *hw;
if (flags & CLK_SET_RATE_GATE)
ops = &master_div_ops;
else
ops = &master_div_ops_chg;
hw = at91_clk_register_master_internal(regmap, name, 1,
parent_name ? &parent_name : NULL,
parent_hw ? &parent_hw : NULL, layout,
characteristics, ops,
lock, flags);
if (!IS_ERR(hw) && safe_div) {
master_div = to_clk_master(hw);
master_div->safe_div = safe_div;
clk_notifier_register(hw->clk,
&clk_master_div_notifier);
}
return hw;
}
static unsigned long
clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_master *master = to_clk_master(hw);
return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_master *master = to_clk_master(hw);
struct clk_hw *parent;
long best_rate = LONG_MIN, best_diff = LONG_MIN;
unsigned long parent_rate;
unsigned int div, i;
/* First: check the dividers of MCR. */
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
parent_rate = clk_hw_get_rate(parent);
if (!parent_rate)
continue;
for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
clk_sama7g5_master_best_diff(req, parent, parent_rate,
&best_rate, &best_diff,
div);
if (!best_diff)
break;
}
if (!best_diff)
break;
}
/* Second: try to request rate form changeable parent. */
if (master->chg_pid < 0)
goto end;
parent = clk_hw_get_parent_by_index(hw, master->chg_pid);
if (!parent)
goto end;
for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
struct clk_rate_request req_parent;
unsigned long req_rate;
if (div == MASTER_PRES_MAX)
req_rate = req->rate * 3;
else
req_rate = req->rate << div;
clk_hw_forward_rate_request(hw, req, parent, &req_parent, req_rate);
if (__clk_determine_rate(parent, &req_parent))
continue;
clk_sama7g5_master_best_diff(req, parent, req_parent.rate,
&best_rate, &best_diff, div);
if (!best_diff)
break;
}
end:
pr_debug("MCK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
__func__, best_rate,
__clk_get_name((req->best_parent_hw)->clk),
req->best_parent_rate);
if (best_rate < 0)
return -EINVAL;
req->rate = best_rate;
return 0;
}
static u8 clk_sama7g5_master_get_parent(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
u8 index;
spin_lock_irqsave(master->lock, flags);
index = clk_mux_val_to_index(&master->hw, master->mux_table, 0,
master->parent);
spin_unlock_irqrestore(master->lock, flags);
return index;
}
static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
spin_lock_irqsave(master->lock, flags);
master->parent = clk_mux_index_to_val(master->mux_table, 0, index);
spin_unlock_irqrestore(master->lock, flags);
return 0;
}
static void clk_sama7g5_master_set(struct clk_master *master,
unsigned int status)
{
unsigned long flags;
unsigned int val, cparent;
unsigned int enable = status ? AT91_PMC_MCR_V2_EN : 0;
unsigned int parent = master->parent << PMC_MCR_CSS_SHIFT;
unsigned int div = master->div << MASTER_DIV_SHIFT;
spin_lock_irqsave(master->lock, flags);
regmap_write(master->regmap, AT91_PMC_MCR_V2,
AT91_PMC_MCR_V2_ID(master->id));
regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
regmap_update_bits(master->regmap, AT91_PMC_MCR_V2,
enable | AT91_PMC_MCR_V2_CSS | AT91_PMC_MCR_V2_DIV |
AT91_PMC_MCR_V2_CMD | AT91_PMC_MCR_V2_ID_MSK,
enable | parent | div | AT91_PMC_MCR_V2_CMD |
AT91_PMC_MCR_V2_ID(master->id));
cparent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT;
/* Wait here only if parent is being changed. */
while ((cparent != master->parent) && !clk_master_ready(master))
cpu_relax();
spin_unlock_irqrestore(master->lock, flags);
}
static int clk_sama7g5_master_enable(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
clk_sama7g5_master_set(master, 1);
return 0;
}
static void clk_sama7g5_master_disable(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
spin_lock_irqsave(master->lock, flags);
regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
regmap_update_bits(master->regmap, AT91_PMC_MCR_V2,
AT91_PMC_MCR_V2_EN | AT91_PMC_MCR_V2_CMD |
AT91_PMC_MCR_V2_ID_MSK,
AT91_PMC_MCR_V2_CMD |
AT91_PMC_MCR_V2_ID(master->id));
spin_unlock_irqrestore(master->lock, flags);
}
static int clk_sama7g5_master_is_enabled(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(master->lock, flags);
regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
spin_unlock_irqrestore(master->lock, flags);
return !!(val & AT91_PMC_MCR_V2_EN);
}
static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_master *master = to_clk_master(hw);
unsigned long div, flags;
div = DIV_ROUND_CLOSEST(parent_rate, rate);
if ((div > (1 << (MASTER_PRES_MAX - 1))) || (div & (div - 1)))
return -EINVAL;
if (div == 3)
div = MASTER_PRES_MAX;
else if (div)
div = ffs(div) - 1;
spin_lock_irqsave(master->lock, flags);
master->div = div;
spin_unlock_irqrestore(master->lock, flags);
return 0;
}
static int clk_sama7g5_master_save_context(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
master->pms.status = clk_sama7g5_master_is_enabled(hw);
return 0;
}
static void clk_sama7g5_master_restore_context(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
if (master->pms.status)
clk_sama7g5_master_set(master, master->pms.status);
}
static const struct clk_ops sama7g5_master_ops = {
.enable = clk_sama7g5_master_enable,
.disable = clk_sama7g5_master_disable,
.is_enabled = clk_sama7g5_master_is_enabled,
.recalc_rate = clk_sama7g5_master_recalc_rate,
.determine_rate = clk_sama7g5_master_determine_rate,
.set_rate = clk_sama7g5_master_set_rate,
.get_parent = clk_sama7g5_master_get_parent,
.set_parent = clk_sama7g5_master_set_parent,
.save_context = clk_sama7g5_master_save_context,
.restore_context = clk_sama7g5_master_restore_context,
};
struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
struct clk_hw **parent_hws,
u32 *mux_table,
spinlock_t *lock, u8 id,
bool critical, int chg_pid)
{
struct clk_master *master;
struct clk_hw *hw;
struct clk_init_data init = {};
unsigned long flags;
unsigned int val;
int ret;
if (!name || !num_parents || !(parent_names || parent_hws) || !mux_table ||
!lock || id > MASTER_MAX_ID)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &sama7g5_master_ops;
if (parent_hws)
init.parent_hws = (const struct clk_hw **)parent_hws;
else
init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
if (chg_pid >= 0)
init.flags |= CLK_SET_RATE_PARENT;
if (critical)
init.flags |= CLK_IS_CRITICAL;
master->hw.init = &init;
master->regmap = regmap;
master->id = id;
master->chg_pid = chg_pid;
master->lock = lock;
master->mux_table = mux_table;
spin_lock_irqsave(master->lock, flags);
regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
master->parent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT;
master->div = (val & AT91_PMC_MCR_V2_DIV) >> MASTER_DIV_SHIFT;
spin_unlock_irqrestore(master->lock, flags);
hw = &master->hw;
ret = clk_hw_register(NULL, &master->hw);
if (ret) {
kfree(master);
hw = ERR_PTR(ret);
}
return hw;
}
const struct clk_master_layout at91rm9200_master_layout = {
.mask = 0x31F,
.pres_shift = 2,
.offset = AT91_PMC_MCKR,
};
const struct clk_master_layout at91sam9x5_master_layout = {
.mask = 0x373,
.pres_shift = 4,
.offset = AT91_PMC_MCKR,
};
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ALPHA_IRONGATE__H__
#define __ALPHA_IRONGATE__H__
#include <linux/types.h>
#include <asm/compiler.h>
/*
* IRONGATE is the internal name for the AMD-751 K7 core logic chipset
* which provides memory controller and PCI access for NAUTILUS-based
* EV6 (21264) systems.
*
* This file is based on:
*
* IronGate management library, (c) 1999 Alpha Processor, Inc.
* Copyright (C) 1999 Alpha Processor, Inc.,
* (David Daniel, Stig Telfer, Soohoon Lee)
*/
/*
* The 21264 supports, and internally recognizes, a 44-bit physical
* address space that is divided equally between memory address space
* and I/O address space. Memory address space resides in the lower
* half of the physical address space (PA[43]=0) and I/O address space
* resides in the upper half of the physical address space (PA[43]=1).
*/
/*
* Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
* through the routines given is 32-bit.
*
* The first 0x40 bytes are standard as per the PCI spec.
*/
typedef volatile __u32 igcsr32;
typedef struct {
igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */
igcsr32 stat_cmd; /* 0x04 - status, command */
igcsr32 class; /* 0x08 - class code, rev ID */
igcsr32 latency; /* 0x0C - header type, PCI latency */
igcsr32 bar0; /* 0x10 - BAR0 - AGP */
igcsr32 bar1; /* 0x14 - BAR1 - GART */
igcsr32 bar2; /* 0x18 - Power Management reg block */
igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */
igcsr32 capptr; /* 0x34 - Capabilities pointer */
igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */
igcsr32 bacsr10; /* 0x40 - base address chip selects */
igcsr32 bacsr32; /* 0x44 - base address chip selects */
igcsr32 bacsr54_eccms761; /* 0x48 - 751: base addr. chip selects
761: ECC, mode/status */
igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */
igcsr32 drammap; /* 0x50 - address mapping control */
igcsr32 dramtm; /* 0x54 - timing, driver strength */
igcsr32 dramms; /* 0x58 - DRAM mode/status */
igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */
igcsr32 biu0; /* 0x60 - bus interface unit */
igcsr32 biusip; /* 0x64 - Serial initialisation pkt */
igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */
igcsr32 mro; /* 0x70 - memory request optimiser */
igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */
igcsr32 whami; /* 0x80 - who am I */
igcsr32 pciarb; /* 0x84 - PCI arbitration control */
igcsr32 pcicfg; /* 0x88 - PCI config status */
igcsr32 rsrvd6[4]; /* 0x8C-0x9B reserved */
igcsr32 pci_mem; /* 0x9C - PCI top of memory,
761 only */
/* AGP (bus 1) control registers */
igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */
igcsr32 agpstat; /* 0xA4 - AGP status register */
igcsr32 agpcmd; /* 0xA8 - AGP control register */
igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */
igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */
} Irongate0;
typedef struct {
igcsr32 dev_vendor; /* 0x00 - Device and Vendor IDs */
igcsr32 stat_cmd; /* 0x04 - Status and Command regs */
igcsr32 class; /* 0x08 - subclass, baseclass etc */
igcsr32 htype; /* 0x0C - header type (at 0x0E) */
igcsr32 rsrvd0[2]; /* 0x10-0x17 reserved */
igcsr32 busnos; /* 0x18 - Primary, secondary bus nos */
igcsr32 io_baselim_regs; /* 0x1C - IO base, IO lim, AGP status */
igcsr32 mem_baselim; /* 0x20 - memory base, memory lim */
igcsr32 pfmem_baselim; /* 0x24 - prefetchable base, lim */
igcsr32 rsrvd1[2]; /* 0x28-0x2F reserved */
igcsr32 io_baselim; /* 0x30 - IO base, IO limit */
igcsr32 rsrvd2[2]; /* 0x34-0x3B - reserved */
igcsr32 interrupt; /* 0x3C - interrupt, PCI bridge ctrl */
} Irongate1;
extern igcsr32 *IronECC;
/*
* Memory spaces:
*/
/* Irongate is consistent with a subset of the Tsunami memory map */
#ifdef USE_48_BIT_KSEG
#define IRONGATE_BIAS 0x80000000000UL
#else
#define IRONGATE_BIAS 0x10000000000UL
#endif
#define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL)
#define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL)
#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
/*
* PCI Configuration space accesses are formed like so:
*
* 0x1FE << 24 | : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 :
* : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 :
* ---bus numer--- -device-- -fun- ---register----
*/
#define IGCSR(dev,fun,reg) ( IRONGATE_CONF | \
((dev)<<11) | \
((fun)<<8) | \
(reg) )
#define IRONGATE0 ((Irongate0 *) IGCSR(0, 0, 0))
#define IRONGATE1 ((Irongate1 *) IGCSR(1, 0, 0))
/*
* Data structure for handling IRONGATE machine checks:
* This is the standard OSF logout frame
*/
#define SCB_Q_SYSERR 0x620 /* OSF definitions */
#define SCB_Q_PROCERR 0x630
#define SCB_Q_SYSMCHK 0x660
#define SCB_Q_PROCMCHK 0x670
struct el_IRONGATE_sysdata_mcheck {
__u32 FrameSize; /* Bytes, including this field */
__u32 FrameFlags; /* <31> = Retry, <30> = Second Error */
__u32 CpuOffset; /* Offset to CPU-specific into */
__u32 SystemOffset; /* Offset to system-specific info */
__u32 MCHK_Code;
__u32 MCHK_Frame_Rev;
__u64 I_STAT;
__u64 DC_STAT;
__u64 C_ADDR;
__u64 DC1_SYNDROME;
__u64 DC0_SYNDROME;
__u64 C_STAT;
__u64 C_STS;
__u64 RESERVED0;
__u64 EXC_ADDR;
__u64 IER_CM;
__u64 ISUM;
__u64 MM_STAT;
__u64 PAL_BASE;
__u64 I_CTL;
__u64 PCTX;
};
#ifdef __KERNEL__
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
#define __IO_EXTERN_INLINE
#endif
/*
* I/O functions:
*
* IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
* K7 can only use linear accesses to get at PCI memory and I/O spaces.
*/
/*
* Memory functions. All accesses are done through linear space.
*/
__EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr)
{
return (void __iomem *)(addr + IRONGATE_IO);
}
extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size);
extern void irongate_iounmap(volatile void __iomem *addr);
__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
{
return addr >= IRONGATE_MEM;
}
__EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long)xaddr;
return addr < IRONGATE_IO || addr >= IRONGATE_CONF;
}
#undef __IO_PREFIX
#define __IO_PREFIX irongate
#define irongate_trivial_rw_bw 1
#define irongate_trivial_rw_lq 1
#define irongate_trivial_io_bw 1
#define irongate_trivial_io_lq 1
#define irongate_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __IO_EXTERN_INLINE
#endif
#endif /* __KERNEL__ */
#endif /* __ALPHA_IRONGATE__H__ */
|
/*
* include/net/tipc.h: Include file for TIPC message header routines
*
* Copyright (c) 2017 Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _TIPC_HDR_H
#define _TIPC_HDR_H
#include <linux/random.h>
#define KEEPALIVE_MSG_MASK 0x0e080000 /* LINK_PROTOCOL + MSG_IS_KEEPALIVE */
struct tipc_basic_hdr {
__be32 w[4];
};
static inline __be32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr)
{
u32 w0 = ntohl(hdr->w[0]);
bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK;
__be32 key;
/* Return source node identity as key */
if (likely(!keepalive_msg))
return hdr->w[3];
/* Spread PROBE/PROBE_REPLY messages across the cores */
get_random_bytes(&key, sizeof(key));
return key;
}
#endif
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright 2018
* Lukasz Majewski, DENX Software Engineering, [email protected]
*/
/dts-v1/;
#include "imx6q.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pwm/pwm.h>
#include <dt-bindings/sound/fsl-imx-audmux.h>
/ {
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
pwms = <&pwm1 0 5000000 0>;
brightness-levels = <0 255>;
num-interpolated-steps = <255>;
default-brightness-level = <250>;
};
beeper {
compatible = "pwm-beeper";
pwms = <&pwm2 0 500000 0>;
};
lcd_display: display {
compatible = "fsl,imx-parallel-display";
#address-cells = <1>;
#size-cells = <0>;
interface-pix-fmt = "rgb24";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ipu1>;
port@0 {
reg = <0>;
lcd_display_in: endpoint {
remote-endpoint = <&ipu1_di0_disp0>;
};
};
port@1 {
reg = <1>;
lcd_display_out: endpoint {
remote-endpoint = <&lcd_panel_in>;
};
};
};
lcd_panel: lcd-panel {
compatible = "auo,g070vvn01";
backlight = <&backlight_lcd>;
power-supply = <®_display>;
port {
lcd_panel_in: endpoint {
remote-endpoint = <&lcd_display_out>;
};
};
};
leds {
compatible = "gpio-leds";
led-green {
label = "led1";
gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "gpio";
default-state = "off";
};
led-red {
label = "led0";
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "gpio";
default-state = "off";
};
};
reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "3P3V";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
reg_audio: regulator-audio {
compatible = "regulator-fixed";
regulator-name = "sgtl5000-supply";
gpio = <&gpio6 31 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
};
reg_display: regulator-display {
compatible = "regulator-fixed";
regulator-name = "display-supply";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
reg_usb_h1_vbus: regulator-usb_h1_vbus {
compatible = "regulator-fixed";
regulator-name = "usb_h1_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
enable-active-high;
};
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "imx6q-sgtl5000-audio";
simple-audio-card,format = "i2s";
simple-audio-card,bitclock-master = <&codec_dai>;
simple-audio-card,frame-master = <&codec_dai>;
cpu_dai: simple-audio-card,cpu {
sound-dai = <&ssi1>;
};
codec_dai: simple-audio-card,codec {
sound-dai = <&sgtl5000>;
};
};
};
&audmux {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_audmux>;
status = "okay";
mux-ssi1 {
fsl,audmux-port = <0>;
fsl,port-config = <
(IMX_AUDMUX_V2_PTCR_SYN |
IMX_AUDMUX_V2_PTCR_TFSEL(2) |
IMX_AUDMUX_V2_PTCR_TCSEL(2) |
IMX_AUDMUX_V2_PTCR_TFSDIR |
IMX_AUDMUX_V2_PTCR_TCLKDIR)
IMX_AUDMUX_V2_PDCR_RXDSEL(2)
>;
};
mux-aud3 {
fsl,audmux-port = <2>;
fsl,port-config = <
IMX_AUDMUX_V2_PTCR_SYN
IMX_AUDMUX_V2_PDCR_RXDSEL(0)
>;
};
};
&can1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan1>;
};
&can2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan2>;
};
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
fsl,magic-packet;
status = "okay";
};
&i2c1 {
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
touchscreen@5d {
compatible = "goodix,gt911";
reg = <0x5d>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ts>;
interrupt-parent = <&gpio1>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
irq-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
};
ds1307: rtc@32 {
compatible = "dallas,ds1307";
reg = <0x32>;
};
};
&i2c2 {
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
sgtl5000: audio-codec@a {
compatible = "fsl,sgtl5000";
#sound-dai-cells = <0>;
reg = <0x0a>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_codec>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <®_3p3v>;
VDDIO-supply = <®_3p3v>;
};
};
&iomuxc {
pinctrl_audmux: audmuxgrp {
fsl,pins = <
MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
>;
};
pinctrl_codec: codecgrp {
fsl,pins = <
MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x1b0b0
/* sgtl5000 sys_mclk clock routed to CLKO1 */
MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000b0
>;
};
pinctrl_enet: enetgrp {
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
>;
};
pinctrl_flexcan1: can1grp {
fsl,pins = <
MX6QDL_PAD_GPIO_7__FLEXCAN1_TX 0x1b0b0
MX6QDL_PAD_GPIO_8__FLEXCAN1_RX 0x1b0b0
>;
};
pinctrl_flexcan2: can2grp {
fsl,pins = <
MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x1b0b0
MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b0b0
>;
};
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <
MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
>;
};
pinctrl_ipu1: ipu1grp {
fsl,pins = <
MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
>;
};
pinctrl_pwm1: pwm1grp {
fsl,pins = <
MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
>;
};
pinctrl_pwm2: pwm2grp {
fsl,pins = <
MX6QDL_PAD_SD1_DAT2__PWM2_OUT 0x1b0b1
>;
};
pinctrl_ts: tsgrp {
fsl,pins = <
MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0
MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x1b0b0
>;
};
pinctrl_uart1: uart1grp {
fsl,pins = <
MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
MX6QDL_PAD_EIM_D28__UART2_CTS_B 0x1b0b1
MX6QDL_PAD_EIM_D29__UART2_RTS_B 0x1b0b1
>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
>;
};
pinctrl_usdhc4: usdhc4grp {
fsl,pins = <
MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
>;
};
};
&pwm1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
};
&pwm2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm2>;
status = "okay";
};
&ssi1 {
status = "okay";
};
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
uart-has-rtscts;
};
&usbh1 {
status = "okay";
};
&usdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <4>;
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
status = "okay";
};
&usdhc4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4>;
bus-width = <8>;
non-removable;
no-1-8-v;
keep-power-in-suspend;
status = "okay";
};
&wdog1 {
status = "okay";
};
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi_si.h
*
* Interface from the device-specific interfaces (OF, DMI, ACPI, PCI,
* etc) to the base ipmi system interface code.
*/
#ifndef __IPMI_SI_H__
#define __IPMI_SI_H__
#include <linux/ipmi.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#define SI_DEVICE_NAME "ipmi_si"
#define DEFAULT_REGSPACING 1
#define DEFAULT_REGSIZE 1
/* Numbers in this enumerator should be mapped to si_to_str[] */
enum si_type {
SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT, SI_TYPE_MAX
};
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
/*
* The structure for doing I/O in the state machine. The state
* machine doesn't have the actual I/O routines, they are done through
* this interface.
*/
struct si_sm_io {
unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset);
void (*outputb)(const struct si_sm_io *io,
unsigned int offset,
unsigned char b);
/*
* Generic info used by the actual handling routines, the
* state machine shouldn't touch these.
*/
void __iomem *addr;
unsigned int regspacing;
unsigned int regsize;
unsigned int regshift;
enum ipmi_addr_space addr_space;
unsigned long addr_data;
enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
union ipmi_smi_info_union addr_info;
int (*io_setup)(struct si_sm_io *info);
void (*io_cleanup)(struct si_sm_io *info);
unsigned int io_size;
int irq;
int (*irq_setup)(struct si_sm_io *io);
void *irq_handler_data;
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
enum si_type si_type;
struct device *dev;
};
int ipmi_si_add_smi(struct si_sm_io *io);
irqreturn_t ipmi_si_irq_handler(int irq, void *data);
void ipmi_irq_start_cleanup(struct si_sm_io *io);
int ipmi_std_irq_setup(struct si_sm_io *io);
void ipmi_irq_finish_setup(struct si_sm_io *io);
void ipmi_si_remove_by_dev(struct device *dev);
struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
unsigned long addr);
void ipmi_hardcode_init(void);
void ipmi_si_hardcode_exit(void);
void ipmi_si_hotmod_exit(void);
int ipmi_si_hardcode_match(int addr_space, unsigned long addr);
void ipmi_si_platform_init(void);
void ipmi_si_platform_shutdown(void);
void ipmi_remove_platform_device_by_name(char *name);
extern struct platform_driver ipmi_platform_driver;
#ifdef CONFIG_PCI
void ipmi_si_pci_init(void);
void ipmi_si_pci_shutdown(void);
#else
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
#else
static inline void ipmi_si_parisc_init(void) { }
static inline void ipmi_si_parisc_shutdown(void) { }
#endif
int ipmi_si_port_setup(struct si_sm_io *io);
int ipmi_si_mem_setup(struct si_sm_io *io);
#endif /* __IPMI_SI_H__ */
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*******************************************************************************
*
* Module Name: nsxfobj - Public interfaces to the ACPI subsystem
* ACPI Object oriented interfaces
*
******************************************************************************/
#define EXPORT_ACPI_INTERFACES
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfobj")
/*******************************************************************************
*
* FUNCTION: acpi_get_type
*
* PARAMETERS: handle - Handle of object whose type is desired
* ret_type - Where the type will be placed
*
* RETURN: Status
*
* DESCRIPTION: This routine returns the type associated with a particular
* handle
*
******************************************************************************/
acpi_status acpi_get_type(acpi_handle handle, acpi_object_type *ret_type)
{
struct acpi_namespace_node *node;
acpi_status status;
/* Parameter Validation */
if (!ret_type) {
return (AE_BAD_PARAMETER);
}
/* Special case for the predefined Root Node (return type ANY) */
if (handle == ACPI_ROOT_OBJECT) {
*ret_type = ACPI_TYPE_ANY;
return (AE_OK);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert and validate the handle */
node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
}
*ret_type = node->type;
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_get_type)
/*******************************************************************************
*
* FUNCTION: acpi_get_parent
*
* PARAMETERS: handle - Handle of object whose parent is desired
* ret_handle - Where the parent handle will be placed
*
* RETURN: Status
*
* DESCRIPTION: Returns a handle to the parent of the object represented by
* Handle.
*
******************************************************************************/
acpi_status acpi_get_parent(acpi_handle handle, acpi_handle *ret_handle)
{
struct acpi_namespace_node *node;
struct acpi_namespace_node *parent_node;
acpi_status status;
if (!ret_handle) {
return (AE_BAD_PARAMETER);
}
/* Special case for the predefined Root Node (no parent) */
if (handle == ACPI_ROOT_OBJECT) {
return (AE_NULL_ENTRY);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert and validate the handle */
node = acpi_ns_validate_handle(handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Get the parent entry */
parent_node = node->parent;
*ret_handle = ACPI_CAST_PTR(acpi_handle, parent_node);
/* Return exception if parent is null */
if (!parent_node) {
status = AE_NULL_ENTRY;
}
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_get_parent)
/*******************************************************************************
*
* FUNCTION: acpi_get_next_object
*
* PARAMETERS: type - Type of object to be searched for
* parent - Parent object whose children we are getting
* last_child - Previous child that was found.
* The NEXT child will be returned
* ret_handle - Where handle to the next object is placed
*
* RETURN: Status
*
* DESCRIPTION: Return the next peer object within the namespace. If Handle is
* valid, Scope is ignored. Otherwise, the first object within
* Scope is returned.
*
******************************************************************************/
acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
acpi_handle child, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node;
struct acpi_namespace_node *parent_node = NULL;
struct acpi_namespace_node *child_node = NULL;
/* Parameter validation */
if (type > ACPI_TYPE_EXTERNAL_MAX) {
return (AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* If null handle, use the parent */
if (!child) {
/* Start search at the beginning of the specified scope */
parent_node = acpi_ns_validate_handle(parent);
if (!parent_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
} else {
/* Non-null handle, ignore the parent */
/* Convert and validate the handle */
child_node = acpi_ns_validate_handle(child);
if (!child_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
}
/* Internal function does the real work */
node = acpi_ns_get_next_node_typed(type, parent_node, child_node);
if (!node) {
status = AE_NOT_FOUND;
goto unlock_and_exit;
}
if (ret_handle) {
*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_get_next_object)
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2022, Intel Corporation. */
#ifndef _ICE_DDP_H_
#define _ICE_DDP_H_
#include "ice_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
#define ICE_FV_OFFSET_INVAL 0x1FF
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
u16 off; /* Offset within the protocol header */
u8 resvrd;
} __packed;
#define ICE_MAX_NUM_PROFILES 256
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
enum ice_ddp_state {
/* Indicates that this call to ice_init_pkg
* successfully loaded the requested DDP package
*/
ICE_DDP_PKG_SUCCESS = 0,
/* Generic error for already loaded errors, it is mapped later to
* the more specific one (one of the next 3)
*/
ICE_DDP_PKG_ALREADY_LOADED = -1,
/* Indicates that a DDP package of the same version has already been
* loaded onto the device by a previous call or by another PF
*/
ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
/* The device has a DDP package that is not supported by the driver */
ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
/* The device has a compatible package
* (but different from the request) already loaded
*/
ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
/* The firmware loaded on the device is not compatible with
* the DDP package loaded
*/
ICE_DDP_PKG_FW_MISMATCH = -5,
/* The DDP package file is invalid */
ICE_DDP_PKG_INVALID_FILE = -6,
/* The version of the DDP package provided is higher than
* the driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
/* The version of the DDP package provided is lower than the
* driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
/* The signature of the DDP package file provided is invalid */
ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
/* The DDP package file security revision is too low and not
* supported by firmware
*/
ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
/* An error occurred in firmware while loading the DDP package */
ICE_DDP_PKG_LOAD_ERROR = -11,
/* Other errors */
ICE_DDP_PKG_ERR = -12
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[];
};
/* Package signing algorithm types */
#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_INVALID 0x00000000
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
#define SEGMENT_TYPE_SIGNING 0x00001001
#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
#define SEGMENT_TYPE_ICE_E830 0x00000017
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[];
};
struct ice_run_time_cfg_seg {
struct ice_generic_seg_hdr hdr;
u8 rsvd[8];
struct ice_buf_table buf_table;
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
struct ice_sign_seg {
struct ice_generic_seg_hdr hdr;
__le32 seg_id;
__le32 sign_type;
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000
#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001
__le32 flags;
#define ICE_SIGN_SEG_RESERVED_COUNT 40
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \
((ICE_PKG_BUF_SIZE - \
struct_size_t(struct ice_buf_hdr, section_entry, 1) - (hd_sz)) / \
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
struct ice_meta_sect {
struct ice_pkg_ver ver;
#define ICE_META_SECT_NAME_SIZE 28
char name[ICE_META_SECT_NAME_SIZE];
__le32 track_id;
};
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
/* Label ICE runtime configuration section IDs */
#define ICE_SID_TX_5_LAYER_TOPO 0x10
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* package labels */
struct ice_label {
__le16 value;
#define ICE_PKG_LABEL_SIZE 64
char name[ICE_PKG_LABEL_SIZE];
};
struct ice_label_section {
__le16 count;
struct ice_label label[];
};
#define ICE_MAX_LABELS_IN_BUF \
ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_label_section, \
label, 1) - \
sizeof(struct ice_label), \
sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
struct ice_fv fv[];
};
struct ice_sw_fv_list_entry {
struct list_head list_entry;
u32 profile_id;
struct ice_fv *fv_ptr;
};
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
u8 tcam_search_key;
} __packed;
struct ice_boost_key {
struct ice_boost_key_value key;
struct ice_boost_key_value key2;
};
/* package Boost TCAM entry */
struct ice_boost_tcam_entry {
__le16 addr;
__le16 reserved;
/* break up the 40 bytes of key into different fields */
struct ice_boost_key key;
u8 boost_hit_index_group;
/* The following contains bitfields which are not on byte boundaries.
* These fields are currently unused by driver software.
*/
#define ICE_BOOST_BIT_FIELDS 43
u8 bit_fields[ICE_BOOST_BIT_FIELDS];
};
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
struct ice_boost_tcam_entry tcam[];
};
#define ICE_MAX_BST_TCAMS_IN_BUF \
ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_boost_tcam_section, \
tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
/* package Marker Ptype TCAM entry */
struct ice_marker_ptype_tcam_entry {
#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
__le16 addr;
__le16 ptype;
u8 keys[20];
};
struct ice_marker_ptype_tcam_section {
__le16 count;
__le16 reserved;
struct ice_marker_ptype_tcam_entry tcam[];
};
#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_marker_ptype_tcam_section, tcam, \
1) - \
sizeof(struct ice_marker_ptype_tcam_entry), \
sizeof(struct ice_marker_ptype_tcam_entry))
struct ice_xlt1_section {
__le16 count;
__le16 offset;
u8 value[];
};
struct ice_xlt2_section {
__le16 count;
__le16 offset;
__le16 value[];
};
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
u8 redir_value[];
};
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
const struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *
ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type, u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset));
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len);
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* Call state changing functions.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include "ar-internal.h"
/*
* Transition a call to the complete state.
*/
bool rxrpc_set_call_completion(struct rxrpc_call *call,
enum rxrpc_call_completion compl,
u32 abort_code,
int error)
{
if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE)
return false;
call->abort_code = abort_code;
call->error = error;
call->completion = compl;
/* Allow reader of completion state to operate locklessly */
rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE);
trace_rxrpc_call_complete(call);
wake_up(&call->waitq);
rxrpc_notify_socket(call);
return true;
}
/*
* Record that a call successfully completed.
*/
bool rxrpc_call_completed(struct rxrpc_call *call)
{
return rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
}
/*
* Record that a call is locally aborted.
*/
bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
u32 abort_code, int error, enum rxrpc_abort_reason why)
{
trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
abort_code, error);
if (!rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
abort_code, error))
return false;
if (test_bit(RXRPC_CALL_EXPOSED, &call->flags))
rxrpc_send_abort_packet(call);
return true;
}
/*
* Record that a call errored out before even getting off the ground, thereby
* setting the state to allow it to be destroyed.
*/
void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
int error)
{
call->abort_code = RX_CALL_DEAD;
call->error = error;
call->completion = compl;
call->_state = RXRPC_CALL_COMPLETE;
trace_rxrpc_call_complete(call);
WARN_ON_ONCE(__test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags));
}
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015 Freescale Semiconductor, Inc.
*
* Freescale DCU drm device driver
*/
#ifndef __FSL_DCU_DRM_DRV_H__
#define __FSL_DCU_DRM_DRV_H__
#include <drm/drm_encoder.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_output.h"
#include "fsl_dcu_drm_plane.h"
#define DCU_DCU_MODE 0x0010
#define DCU_MODE_BLEND_ITER(x) ((x) << 20)
#define DCU_MODE_RASTER_EN BIT(14)
#define DCU_MODE_DCU_MODE(x) (x)
#define DCU_MODE_DCU_MODE_MASK 0x03
#define DCU_MODE_OFF 0
#define DCU_MODE_NORMAL 1
#define DCU_MODE_TEST 2
#define DCU_MODE_COLORBAR 3
#define DCU_BGND 0x0014
#define DCU_BGND_R(x) ((x) << 16)
#define DCU_BGND_G(x) ((x) << 8)
#define DCU_BGND_B(x) (x)
#define DCU_DISP_SIZE 0x0018
#define DCU_DISP_SIZE_DELTA_Y(x) ((x) << 16)
/*Regisiter value 1/16 of horizontal resolution*/
#define DCU_DISP_SIZE_DELTA_X(x) ((x) >> 4)
#define DCU_HSYN_PARA 0x001c
#define DCU_HSYN_PARA_BP(x) ((x) << 22)
#define DCU_HSYN_PARA_PW(x) ((x) << 11)
#define DCU_HSYN_PARA_FP(x) (x)
#define DCU_VSYN_PARA 0x0020
#define DCU_VSYN_PARA_BP(x) ((x) << 22)
#define DCU_VSYN_PARA_PW(x) ((x) << 11)
#define DCU_VSYN_PARA_FP(x) (x)
#define DCU_SYN_POL 0x0024
#define DCU_SYN_POL_INV_PXCK BIT(6)
#define DCU_SYN_POL_NEG BIT(5)
#define DCU_SYN_POL_INV_VS_LOW BIT(1)
#define DCU_SYN_POL_INV_HS_LOW BIT(0)
#define DCU_THRESHOLD 0x0028
#define DCU_THRESHOLD_LS_BF_VS(x) ((x) << 16)
#define DCU_THRESHOLD_OUT_BUF_HIGH(x) ((x) << 8)
#define DCU_THRESHOLD_OUT_BUF_LOW(x) (x)
#define BF_VS_VAL 0x03
#define BUF_MAX_VAL 0x78
#define BUF_MIN_VAL 0x0a
#define DCU_INT_STATUS 0x002C
#define DCU_INT_STATUS_VSYNC BIT(0)
#define DCU_INT_STATUS_UNDRUN BIT(1)
#define DCU_INT_STATUS_LSBFVS BIT(2)
#define DCU_INT_STATUS_VBLANK BIT(3)
#define DCU_INT_STATUS_CRCREADY BIT(4)
#define DCU_INT_STATUS_CRCOVERFLOW BIT(5)
#define DCU_INT_STATUS_P1FIFOLO BIT(6)
#define DCU_INT_STATUS_P1FIFOHI BIT(7)
#define DCU_INT_STATUS_P2FIFOLO BIT(8)
#define DCU_INT_STATUS_P2FIFOHI BIT(9)
#define DCU_INT_STATUS_PROGEND BIT(10)
#define DCU_INT_STATUS_IPMERROR BIT(11)
#define DCU_INT_STATUS_LYRTRANS BIT(12)
#define DCU_INT_STATUS_DMATRANS BIT(14)
#define DCU_INT_STATUS_P3FIFOLO BIT(16)
#define DCU_INT_STATUS_P3FIFOHI BIT(17)
#define DCU_INT_STATUS_P4FIFOLO BIT(18)
#define DCU_INT_STATUS_P4FIFOHI BIT(19)
#define DCU_INT_STATUS_P1EMPTY BIT(26)
#define DCU_INT_STATUS_P2EMPTY BIT(27)
#define DCU_INT_STATUS_P3EMPTY BIT(28)
#define DCU_INT_STATUS_P4EMPTY BIT(29)
#define DCU_INT_MASK 0x0030
#define DCU_INT_MASK_VSYNC BIT(0)
#define DCU_INT_MASK_UNDRUN BIT(1)
#define DCU_INT_MASK_LSBFVS BIT(2)
#define DCU_INT_MASK_VBLANK BIT(3)
#define DCU_INT_MASK_CRCREADY BIT(4)
#define DCU_INT_MASK_CRCOVERFLOW BIT(5)
#define DCU_INT_MASK_P1FIFOLO BIT(6)
#define DCU_INT_MASK_P1FIFOHI BIT(7)
#define DCU_INT_MASK_P2FIFOLO BIT(8)
#define DCU_INT_MASK_P2FIFOHI BIT(9)
#define DCU_INT_MASK_PROGEND BIT(10)
#define DCU_INT_MASK_IPMERROR BIT(11)
#define DCU_INT_MASK_LYRTRANS BIT(12)
#define DCU_INT_MASK_DMATRANS BIT(14)
#define DCU_INT_MASK_P3FIFOLO BIT(16)
#define DCU_INT_MASK_P3FIFOHI BIT(17)
#define DCU_INT_MASK_P4FIFOLO BIT(18)
#define DCU_INT_MASK_P4FIFOHI BIT(19)
#define DCU_INT_MASK_P1EMPTY BIT(26)
#define DCU_INT_MASK_P2EMPTY BIT(27)
#define DCU_INT_MASK_P3EMPTY BIT(28)
#define DCU_INT_MASK_P4EMPTY BIT(29)
#define DCU_DIV_RATIO 0x0054
#define DCU_UPDATE_MODE 0x00cc
#define DCU_UPDATE_MODE_MODE BIT(31)
#define DCU_UPDATE_MODE_READREG BIT(30)
#define DCU_DCFB_MAX 0x300
#define DCU_CTRLDESCLN(layer, reg) (0x200 + (reg - 1) * 4 + (layer) * 0x40)
#define DCU_LAYER_HEIGHT(x) ((x) << 16)
#define DCU_LAYER_WIDTH(x) (x)
#define DCU_LAYER_POSY(x) ((x) << 16)
#define DCU_LAYER_POSX(x) (x)
#define DCU_LAYER_EN BIT(31)
#define DCU_LAYER_TILE_EN BIT(30)
#define DCU_LAYER_DATA_SEL_CLUT BIT(29)
#define DCU_LAYER_SAFETY_EN BIT(28)
#define DCU_LAYER_TRANS(x) ((x) << 20)
#define DCU_LAYER_BPP(x) ((x) << 16)
#define DCU_LAYER_RLE_EN BIT(15)
#define DCU_LAYER_LUOFFS(x) ((x) << 4)
#define DCU_LAYER_BB_ON BIT(2)
#define DCU_LAYER_AB_NONE 0
#define DCU_LAYER_AB_CHROMA_KEYING 1
#define DCU_LAYER_AB_WHOLE_FRAME 2
#define DCU_LAYER_CKMAX_R(x) ((x) << 16)
#define DCU_LAYER_CKMAX_G(x) ((x) << 8)
#define DCU_LAYER_CKMAX_B(x) (x)
#define DCU_LAYER_CKMIN_R(x) ((x) << 16)
#define DCU_LAYER_CKMIN_G(x) ((x) << 8)
#define DCU_LAYER_CKMIN_B(x) (x)
#define DCU_LAYER_TILE_VER(x) ((x) << 16)
#define DCU_LAYER_TILE_HOR(x) (x)
#define DCU_LAYER_FG_FCOLOR(x) (x)
#define DCU_LAYER_BG_BCOLOR(x) (x)
#define DCU_LAYER_POST_SKIP(x) ((x) << 16)
#define DCU_LAYER_PRE_SKIP(x) (x)
#define FSL_DCU_RGB565 4
#define FSL_DCU_RGB888 5
#define FSL_DCU_ARGB8888 6
#define FSL_DCU_ARGB1555 11
#define FSL_DCU_ARGB4444 12
#define FSL_DCU_YUV422 14
#define SCFG_PIXCLKCR 0x28
#define SCFG_PIXCLKCR_PXCEN BIT(31)
#define VF610_LAYER_REG_NUM 9
#define LS1021A_LAYER_REG_NUM 10
struct clk;
struct device;
struct drm_device;
struct fsl_dcu_soc_data {
const char *name;
/*total layer number*/
unsigned int total_layer;
/*max layer number DCU supported*/
unsigned int max_layer;
unsigned int layer_regs;
};
struct fsl_dcu_drm_device {
struct device *dev;
struct device_node *np;
struct regmap *regmap;
int irq;
struct clk *clk;
struct clk *pix_clk;
struct fsl_tcon *tcon;
/*protects hardware register*/
spinlock_t irq_lock;
struct drm_device *drm;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
const struct fsl_dcu_soc_data *soc;
};
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
#endif /* __FSL_DCU_DRM_DRV_H__ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Goodix Berlin Touchscreen Driver
*
* Copyright (C) 2020 - 2021 Goodix, Inc.
* Copyright (C) 2023 Linaro Ltd.
*
* Based on goodix_ts_berlin driver.
*/
#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/input.h>
#include "goodix_berlin.h"
#define GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN 1
#define GOODIX_BERLIN_REGISTER_WIDTH 4
#define GOODIX_BERLIN_SPI_READ_DUMMY_LEN 3
#define GOODIX_BERLIN_SPI_READ_PREFIX_LEN (GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN + \
GOODIX_BERLIN_REGISTER_WIDTH + \
GOODIX_BERLIN_SPI_READ_DUMMY_LEN)
#define GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN (GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN + \
GOODIX_BERLIN_REGISTER_WIDTH)
#define GOODIX_BERLIN_SPI_WRITE_FLAG 0xF0
#define GOODIX_BERLIN_SPI_READ_FLAG 0xF1
static int goodix_berlin_spi_read(void *context, const void *reg_buf,
size_t reg_size, void *val_buf,
size_t val_size)
{
struct spi_device *spi = context;
struct spi_transfer xfers;
struct spi_message spi_msg;
const u32 *reg = reg_buf; /* reg is stored as native u32 at start of buffer */
int error;
if (reg_size != GOODIX_BERLIN_REGISTER_WIDTH)
return -EINVAL;
u8 *buf __free(kfree) =
kzalloc(GOODIX_BERLIN_SPI_READ_PREFIX_LEN + val_size,
GFP_KERNEL);
if (!buf)
return -ENOMEM;
spi_message_init(&spi_msg);
memset(&xfers, 0, sizeof(xfers));
/* buffer format: 0xF1 + addr(4bytes) + dummy(3bytes) + data */
buf[0] = GOODIX_BERLIN_SPI_READ_FLAG;
put_unaligned_be32(*reg, buf + GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN);
memset(buf + GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN + GOODIX_BERLIN_REGISTER_WIDTH,
0xff, GOODIX_BERLIN_SPI_READ_DUMMY_LEN);
xfers.tx_buf = buf;
xfers.rx_buf = buf;
xfers.len = GOODIX_BERLIN_SPI_READ_PREFIX_LEN + val_size;
xfers.cs_change = 0;
spi_message_add_tail(&xfers, &spi_msg);
error = spi_sync(spi, &spi_msg);
if (error < 0) {
dev_err(&spi->dev, "spi transfer error, %d", error);
return error;
}
memcpy(val_buf, buf + GOODIX_BERLIN_SPI_READ_PREFIX_LEN, val_size);
return error;
}
static int goodix_berlin_spi_write(void *context, const void *data,
size_t count)
{
unsigned int len = count - GOODIX_BERLIN_REGISTER_WIDTH;
struct spi_device *spi = context;
struct spi_transfer xfers;
struct spi_message spi_msg;
const u32 *reg = data; /* reg is stored as native u32 at start of buffer */
int error;
u8 *buf __free(kfree) =
kzalloc(GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN + len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
spi_message_init(&spi_msg);
memset(&xfers, 0, sizeof(xfers));
buf[0] = GOODIX_BERLIN_SPI_WRITE_FLAG;
put_unaligned_be32(*reg, buf + GOODIX_BERLIN_SPI_TRANS_PREFIX_LEN);
memcpy(buf + GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN,
data + GOODIX_BERLIN_REGISTER_WIDTH, len);
xfers.tx_buf = buf;
xfers.len = GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN + len;
xfers.cs_change = 0;
spi_message_add_tail(&xfers, &spi_msg);
error = spi_sync(spi, &spi_msg);
if (error < 0) {
dev_err(&spi->dev, "spi transfer error, %d", error);
return error;
}
return 0;
}
static const struct regmap_config goodix_berlin_spi_regmap_conf = {
.reg_bits = 32,
.val_bits = 8,
.read = goodix_berlin_spi_read,
.write = goodix_berlin_spi_write,
};
/* vendor & product left unassigned here, should probably be updated from fw info */
static const struct input_id goodix_berlin_spi_input_id = {
.bustype = BUS_SPI,
};
static int goodix_berlin_spi_probe(struct spi_device *spi)
{
struct regmap_config regmap_config;
struct regmap *regmap;
size_t max_size;
int error = 0;
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
error = spi_setup(spi);
if (error)
return error;
max_size = spi_max_transfer_size(spi);
regmap_config = goodix_berlin_spi_regmap_conf;
regmap_config.max_raw_read = max_size - GOODIX_BERLIN_SPI_READ_PREFIX_LEN;
regmap_config.max_raw_write = max_size - GOODIX_BERLIN_SPI_WRITE_PREFIX_LEN;
regmap = devm_regmap_init(&spi->dev, NULL, spi, ®map_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
error = goodix_berlin_probe(&spi->dev, spi->irq,
&goodix_berlin_spi_input_id, regmap);
if (error)
return error;
return 0;
}
static const struct spi_device_id goodix_berlin_spi_ids[] = {
{ "gt9916" },
{ },
};
MODULE_DEVICE_TABLE(spi, goodix_berlin_spi_ids);
static const struct of_device_id goodix_berlin_spi_of_match[] = {
{ .compatible = "goodix,gt9916", },
{ }
};
MODULE_DEVICE_TABLE(of, goodix_berlin_spi_of_match);
static struct spi_driver goodix_berlin_spi_driver = {
.driver = {
.name = "goodix-berlin-spi",
.of_match_table = goodix_berlin_spi_of_match,
.pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
.dev_groups = goodix_berlin_groups,
},
.probe = goodix_berlin_spi_probe,
.id_table = goodix_berlin_spi_ids,
};
module_spi_driver(goodix_berlin_spi_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Goodix Berlin SPI Touchscreen driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FCOE_LIBFCOE_H_
#define _FCOE_LIBFCOE_H_
extern unsigned int libfcoe_debug_logging;
#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
do { \
if (unlikely(libfcoe_debug_logging & LEVEL)) \
do { \
CMD; \
} while (0); \
} while (0)
#define LIBFCOE_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
pr_info("libfcoe: " fmt, ##args);)
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
pr_info("host%d: fip: " fmt, \
(fip)->lp->host->host_no, ##args);)
#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
pr_info("%s: " fmt, __func__, ##args);)
#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \
pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
#endif /* _FCOE_LIBFCOE_H_ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Socionext Inc.
* Author: Masahiro Yamada <[email protected]>
*/
#include "clk-uniphier.h"
#define UNIPHIER_PERI_CLK_UART(idx, ch) \
UNIPHIER_CLK_GATE("uart" #ch, (idx), "uart", 0x24, 19 + (ch))
#define UNIPHIER_PERI_CLK_I2C_COMMON \
UNIPHIER_CLK_GATE("i2c-common", -1, "i2c", 0x20, 1)
#define UNIPHIER_PERI_CLK_I2C(idx, ch) \
UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c-common", 0x24, 5 + (ch))
#define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
#define UNIPHIER_PERI_CLK_SCSSI(idx, ch) \
UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch))
#define UNIPHIER_PERI_CLK_MCSSI(idx) \
UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_UART(0, 0),
UNIPHIER_PERI_CLK_UART(1, 1),
UNIPHIER_PERI_CLK_UART(2, 2),
UNIPHIER_PERI_CLK_UART(3, 3),
UNIPHIER_PERI_CLK_I2C_COMMON,
UNIPHIER_PERI_CLK_I2C(4, 0),
UNIPHIER_PERI_CLK_I2C(5, 1),
UNIPHIER_PERI_CLK_I2C(6, 2),
UNIPHIER_PERI_CLK_I2C(7, 3),
UNIPHIER_PERI_CLK_I2C(8, 4),
UNIPHIER_PERI_CLK_SCSSI(11, 0),
{ /* sentinel */ }
};
const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_UART(0, 0),
UNIPHIER_PERI_CLK_UART(1, 1),
UNIPHIER_PERI_CLK_UART(2, 2),
UNIPHIER_PERI_CLK_UART(3, 3),
UNIPHIER_PERI_CLK_FI2C(4, 0),
UNIPHIER_PERI_CLK_FI2C(5, 1),
UNIPHIER_PERI_CLK_FI2C(6, 2),
UNIPHIER_PERI_CLK_FI2C(7, 3),
UNIPHIER_PERI_CLK_FI2C(8, 4),
UNIPHIER_PERI_CLK_FI2C(9, 5),
UNIPHIER_PERI_CLK_FI2C(10, 6),
UNIPHIER_PERI_CLK_SCSSI(11, 0),
UNIPHIER_PERI_CLK_SCSSI(12, 1),
UNIPHIER_PERI_CLK_SCSSI(13, 2),
UNIPHIER_PERI_CLK_SCSSI(14, 3),
UNIPHIER_PERI_CLK_MCSSI(15),
{ /* sentinel */ }
};
|
// SPDX-License-Identifier: MIT
/*
* clock framework for AMD FCH controller block
*
* Copyright 2018 Advanced Micro Devices, Inc.
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/platform_data/clk-fch.h>
#include <linux/platform_device.h>
/* Clock Driving Strength 2 register */
#define CLKDRVSTR2 0x28
/* Clock Control 1 register */
#define MISCCLKCNTL1 0x40
/* Auxiliary clock1 enable bit */
#define OSCCLKENB 2
/* 25Mhz auxiliary output clock freq bit */
#define OSCOUT1CLK25MHZ 16
#define ST_CLK_48M 0
#define ST_CLK_25M 1
#define ST_CLK_MUX 2
#define ST_CLK_GATE 3
#define ST_MAX_CLKS 4
#define CLK_48M_FIXED 0
#define CLK_GATE_FIXED 1
#define CLK_MAX_FIXED 2
/* List of supported CPU ids for clk mux with 25Mhz clk support */
#define AMD_CPU_ID_ST 0x1576
static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
static struct clk_hw *hws[ST_MAX_CLKS];
static const struct pci_device_id fch_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_ST) },
{ }
};
static int fch_clk_probe(struct platform_device *pdev)
{
struct fch_clk_data *fch_data;
struct pci_dev *rdev;
fch_data = dev_get_platdata(&pdev->dev);
if (!fch_data || !fch_data->base)
return -EINVAL;
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev) {
dev_err(&pdev->dev, "FCH device not found\n");
return -ENODEV;
}
if (pci_match_id(fch_pci_ids, rdev)) {
hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
NULL, 0, 48000000);
hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz",
NULL, 0, 25000000);
hws[ST_CLK_MUX] = clk_hw_register_mux(NULL, "oscout1_mux",
clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
0, fch_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0,
NULL);
clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
"oscout1_mux", 0, fch_data->base + MISCCLKCNTL1,
OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE],
fch_data->name, NULL);
} else {
hws[CLK_48M_FIXED] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
NULL, 0, 48000000);
hws[CLK_GATE_FIXED] = clk_hw_register_gate(NULL, "oscout1",
"clk48MHz", 0, fch_data->base + MISCCLKCNTL1,
OSCCLKENB, 0, NULL);
devm_clk_hw_register_clkdev(&pdev->dev, hws[CLK_GATE_FIXED],
fch_data->name, NULL);
}
pci_dev_put(rdev);
return 0;
}
static void fch_clk_remove(struct platform_device *pdev)
{
int i, clks;
struct pci_dev *rdev;
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev)
return;
clks = pci_match_id(fch_pci_ids, rdev) ? CLK_MAX_FIXED : ST_MAX_CLKS;
for (i = 0; i < clks; i++)
clk_hw_unregister(hws[i]);
pci_dev_put(rdev);
}
static struct platform_driver fch_clk_driver = {
.driver = {
.name = "clk-fch",
.suppress_bind_attrs = true,
},
.probe = fch_clk_probe,
.remove = fch_clk_remove,
};
builtin_platform_driver(fch_clk_driver);
|
/*
* Copyright 2013 Wills Wang
*
* Wills Wang <[email protected]>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
* licensing only applies to this file, and not this project as a
* whole.
*
* a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Or, alternatively,
*
* b) Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sun7i-a20.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
model = "Merrii A20 Hummingbird";
compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
aliases {
serial0 = &uart0;
serial1 = &uart2;
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
};
chosen {
stdout-path = "serial0:115200n8";
};
reg_mmc3_vdd: regulator-mmc3-vdd {
compatible = "regulator-fixed";
regulator-name = "mmc3_vdd";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
enable-active-high;
gpio = <&pio 7 9 GPIO_ACTIVE_HIGH>; /* PH9 */
};
reg_gmac_vdd: regulator-gmac-vdd {
compatible = "regulator-fixed";
regulator-name = "gmac_vdd";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
enable-active-high;
gpio = <&pio 7 16 GPIO_ACTIVE_HIGH>; /* PH16 */
};
};
&ahci {
target-supply = <®_ahci_5v>;
status = "okay";
};
&ehci0 {
status = "okay";
};
&ehci1 {
status = "okay";
};
&gmac {
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
phy-mode = "rgmii";
phy-supply = <®_gmac_vdd>;
status = "okay";
};
&i2c0 {
status = "okay";
axp209: pmic@34 {
compatible = "x-powers,axp209";
reg = <0x34>;
interrupt-parent = <&nmi_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <1>;
};
};
&i2c1 {
status = "okay";
};
&i2c2 {
status = "okay";
};
&i2c3 {
status = "okay";
};
&ir0 {
pinctrl-names = "default";
pinctrl-0 = <&ir0_rx_pin>;
status = "okay";
};
&gmac_mdio {
phy1: ethernet-phy@1 {
reg = <1>;
reset-gpios = <&pio 0 17 GPIO_ACTIVE_LOW>; /* PA17 */
reset-assert-us = <10000>;
/* wait 1s after reset, otherwise fail to read phy id */
reset-deassert-us = <1000000>;
};
};
&mmc0 {
vmmc-supply = <®_vcc3v0>;
bus-width = <4>;
cd-gpios = <&pio 7 1 GPIO_ACTIVE_LOW>; /* PH1 */
status = "okay";
};
&mmc3 {
vmmc-supply = <®_mmc3_vdd>;
bus-width = <4>;
non-removable;
status = "okay";
};
&ohci0 {
status = "okay";
};
&ohci1 {
status = "okay";
};
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
status = "okay";
};
®_ahci_5v {
gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>; /* PH15 */
status = "okay";
};
®_usb1_vbus {
gpio = <&pio 7 2 GPIO_ACTIVE_HIGH>; /* PH2 */
status = "okay";
};
®_usb2_vbus {
status = "okay";
};
&spi2 {
pinctrl-names = "default";
pinctrl-0 = <&spi2_pb_pins>,
<&spi2_cs0_pb_pin>;
status = "okay";
};
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pb_pins>;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&uart2_pi_pins>, <&uart2_cts_rts_pi_pins>;
status = "okay";
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&uart3_pg_pins>, <&uart3_cts_rts_pg_pins>;
status = "okay";
};
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&uart4_pg_pins>;
status = "okay";
};
&uart5 {
pinctrl-names = "default";
pinctrl-0 = <&uart5_pi_pins>;
status = "okay";
};
&usbphy {
usb1_vbus-supply = <®_usb1_vbus>;
usb2_vbus-supply = <®_usb2_vbus>;
status = "okay";
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.